from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
The raw code for this IPython notebook is by default hidden for easier reading.
To toggle on/off the raw code, click <a href="javascript:code_toggle()">here</a>.''')
%matplotlib inline
%load_ext autoreload
from IPython.display import display
# from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import pickle
from prettytable import PrettyTable #sudo pip install prettytable
from utils import *
def choid(x):
return x
The original text file of Shakespeare's Macbeth was downloaded from Project Gutenberg and it was modified in such way that all names of characters (from now on, called "actors" in the social networks sense) were put in a uniform form.
%autoreload 2
from testy2 import *
from syntheticThreeLayerGraph_time import synthetic_multi_level_dict,plot_graph_dict,plot_graph_k_n, plot_total_graph_with_weights
fileName = 'corpora/ShakespeareMacbethOrig_from_PG_mod.txt'
f=open(fileName,'r')
attributes_filename='corpora/Character Atrributes - Macbeth - 1.csv'
act_dict,u,pers_l,pers_dict,pact,lact,scen_dict,lscen,pscen=create_dict_of_acts(fileName)
print 'Actors appearing in Macbeth:'
print
for actor in pers_l:
print actor
print
print 'The number of actors in Macbeth is', len(pers_l)
print
print 'Attributes of actors in Macbeth:'
print
attribute_dict={}
af=open(attributes_filename,'r')
lis_pret=['Name','Gender','Social Status','Alliance','Drive','Count of speech characters']
pret=PrettyTable(lis_pret)
pret.padding_width=1
# print
# print 'Names not associated in attributes file'
list_of_attributes_dic={}
list_of_att=[]
for lin in af:
ll=lin.strip().split(',')
momo=0
if ll[0]=='Name':
list_of_att={il:l.strip() for il,l in enumerate(ll)}
continue
try:
for k,l in pers_dict[ll[0]].items():
momo+= len(l)
except:
momo=0
ll.append(momo)
pret.add_row(ll)
if ll[0].strip() in pers_l:
# print ll
for en,vv in enumerate(ll[:-1]):
# print en,vv
if list_of_att[en] not in list_of_attributes_dic:
list_of_attributes_dic[list_of_att[en]]=set()
list_of_attributes_dic[list_of_att[en]].add(ll[en].strip())
else:
list_of_attributes_dic[list_of_att[en]].add(ll[en].strip())
attribute_dict[ll[0].strip()]=(ll[1].strip(),ll[2].strip(),ll[3].strip(),ll[4].strip(),momo)
else:
print ll,'no'
# print
# print 'Atributes'
print pret
fop=open('list_of_attributes_dic.dmpp','w')
pickle.dump(list_of_attributes_dic,fop)
fop.close()
fop=open('attribute_dict.dmpp','w')
pickle.dump(attribute_dict,fop)
fop.close()
fop=open('list_of_att.dmpp','w')
pickle.dump(list_of_att,fop)
fop.close()
The relationship (tie) among actors (detected automatically here) is the so-called "conversational relationship," which is defined whenever two actors are co-participating in a conversation. Actually, due to the structure of Shakespeare's printed text (taken from Project Gutenberg), the unit of conversation (i.e., a conversational chunk) used here is the text body delimited between two empty lines.
In this way, the resulting network is represented by a weighted undirected graph, where tha weight of an edge(tie) joining two actors is the total number of conversational chaunks in which these ctors participate. Moreover, we are decomposing the network into slices (or layers) according either to one of the five Acts or to one of the total 26 Scenes, during which the measured coversations take place.
graph_dic,ract_dic,pernode_dict,nodper_dic,cnum,active_actors=create_graph_dict(act_dict,pers_l,pers_dict,u,attribute_dict)
G, list_of_Graphs_final, Gagr, edgeList ,nmap ,mapping,k,n=synthetic_multi_level_dict(graph_dic,pernode_dict,nodper_dic,ract_dic,No_isolates=True)
conver_rel = 0
pos_dict={}
act_rel=[]
for k,v in graph_dic.items():
v.remove_nodes_from(nx.isolates(v))
print nx.info(v)
for nd in v.nodes():
act_rel.append(nd)
conver_rel += len(v.edges())
print
print 'Actors participating in all conversational relationships in all Macbeth Acts:'
print
for i in pernode_dict:
print i
print
print 'The total number of actors participating in all conversational relationships in all Macbeth Acts is', len(set(act_rel))
print
print 'The total number of conversational relationships (edges) among actors \
participating in all Macbeth Acts is', conver_rel
met_acts={'Act '+str(i):u for u,i in enumerate(sorted(graph_dic.keys())) }
rmet_acts={k:v for v,k in met_acts.items()}
act_scene_graph=create_two_mode_act_scene_graph(active_actors)
print nx.info(act_scene_graph)
pos_Scene=nx.spring_layout(act_scene_graph,scale=50)
# nx.draw_networkx(act_scene_graph,pos)
fig=plt.figure(figsize=(12,12))
# print 'aa'
# print act_scene_graph.nodes()
actors=[i for j in active_actors.values() for i in j]
nx.draw_networkx_nodes(act_scene_graph,pos_Scene,nodelist=list(set(actors)),node_color='r',node_size=500, node_shape='o',alpha=.3)
nx.draw_networkx_nodes(act_scene_graph,pos_Scene,nodelist=active_actors.keys(),node_color='b',node_size=700,node_shape='s',alpha =.2)
lso=nx.draw_networkx_edges(act_scene_graph,pos_Scene,edge_color='g',alpha=0.35)
# print "be"
lsbe=nx.draw_networkx_labels(act_scene_graph,pos_Scene,font_size=12)#17)
plt.title('Bipartite graph of actors participating in Macbeth acts',fontsize=22)
# print "be"
mon=plt.axis('off')
graph_dic_scene,ract_dic,pernode_dict,nodper_dic,cnum,active_actors_sc=create_graph_dict(scen_dict,pers_l,pers_dict,u,attribute_dict)
G, list_of_Graphs_final, Gagr, edgeList ,nmap ,mapping,k,n=synthetic_multi_level_dict(graph_dic_scene,pernode_dict,nodper_dic,ract_dic,No_isolates=True)
conver_rel = 0
pos_dict={}
counter=0
act_rel=[]
for k in sorted(graph_dic_scene):
v=graph_dic_scene[k]
# for k,v in graph_dic_scene.items():
# print k,v.nodes()
# print v.edges()
v.remove_nodes_from(nx.isolates(v))
# print v,nx.isolates(v)
print counter,
print nx.info(v)
for nd in v.nodes():
act_rel.append(nd)
print 'Number of actors appearing in Macbeth scene %s: %i' %(k,len(active_actors_sc[k]))
conver_rel += len(v.edges())
print
counter+=1
print 'Actors participating in all conversational relationships in all Macbeth Scenes:'
print
for i in pernode_dict:
print i
print
print 'The total number of actors participating in all conversational relationships in all Macbeth Scenes is', len(set(act_rel))
print
print 'The total number of conversational relationships (edges) among actors \
participating in all Macbeth Scenes is', conver_rel
act_scene_graph=create_two_mode_act_scene_graph(active_actors_sc)
print nx.info(act_scene_graph)
pos_Scene=nx.spring_layout(act_scene_graph,scale=50)
# nx.draw_networkx(act_scene_graph,pos)
fig=plt.figure(figsize=(12,12))
# print 'aa'
# print act_scene_graph.nodes()
actors=[i for j in active_actors_sc.values() for i in j]
nx.draw_networkx_nodes(act_scene_graph,pos_Scene,nodelist=list(set(actors)),node_color='r',node_size=500, node_shape='o',alpha=.3)
nx.draw_networkx_nodes(act_scene_graph,pos_Scene,nodelist=active_actors_sc.keys(),node_color='b',node_size=700,node_shape='s',alpha =.2)
lso=nx.draw_networkx_edges(act_scene_graph,pos_Scene,edge_color='g',alpha=0.35)
# print "be"
lsbe=nx.draw_networkx_labels(act_scene_graph,pos_Scene,font_size=12)#17)
plt.title('Bipartite graph of actors participating in Macbeth scenes',fontsize=22)
# print "be"
mon=plt.axis('off')
%autoreload 2
from testy2 import *
from syntheticThreeLayerGraph_time import synthetic_multi_level_dict,plot_graph_dict,plot_graph_k_n, plot_total_graph_with_weights
fileName = 'corpora/ShakespeareMacbethOrig_from_PG_mod.txt'
f=open(fileName,'r')
attributes_filename='corpora/Character Atrributes - Macbeth - 1.csv'
act_dict,u,pers_l,pers_dict,pact,lact,scen_dict,lscen,pscen=create_dict_of_acts(fileName)
fop=open('list_of_attributes_dic.dmpp')
list_of_attributes_dic=pickle.load(fop)
fop.close()
fop=open('attribute_dict.dmpp')
attribute_dict=pickle.load(fop)
fop.close()
fop=open('list_of_att.dmpp')
list_of_att=pickle.load(fop)
fop.close()
# attribute_dict={}
# af=open(attributes_filename,'r')
lis_pret=['Name','Gender','Social Status','Alliance','Drive','Count of speech characters']#,'All Attributes']
# pret=PrettyTable(lis_pret)
# pret.padding_width=1
graph_dic,ract_dic,pernode_dict,nodper_dic,cnum,active_actors=create_graph_dict(act_dict,pers_l,pers_dict,u,attribute_dict)
G, list_of_Graphs_final, Gagr, edgeList ,nmap ,mapping,k,n=synthetic_multi_level_dict(graph_dic,pernode_dict,nodper_dic,ract_dic,No_isolates=True)
conver_rel = 0
pos_dict={}
for k,v in graph_dic.items():
v.remove_nodes_from(nx.isolates(v))
conver_rel += len(v.edges())
# print cnum
# graph_dic_scene,ract_dicS,pernode_dictS,nodper_dicS,cnumS,active_actors_sc=create_graph_dict(scen_dict,pers_l,pers_dict,u,attribute_dict)
# G, list_of_Graphs_finalS, GagrS, edgeListS ,nmapS ,mappingS,k,n=synthetic_multi_level_dict(graph_dic_scene,pernode_dictS,nodper_dicS,ract_dicS,No_isolates=True)
# for k,v in graph_dic_scene.items():
# v.remove_nodes_from(nx.isolates(v))
# # conver_rel += len(v.edges())
# # print cnumS
# ract_dic.update(ract_dicS)
# for ll in cnumS:
# cnum.append(ll)
# # cnum.update(cnumS)
# uu=len(graph_dic.keys())
# scene_acts={'Scene '+str(i):u+uu for u,i in enumerate(sorted(graph_dic_scene.keys()))}
met_acts={'Act '+str(i):u for u,i in enumerate(sorted(graph_dic.keys())) }
# uu=len()
met_acts['(all Acts)'] = 1000
# met_acts.update(scene_acts)
graph_dic['(all Acts)']=plot_total_graph_with_weights(graph_dic,nodper_dic)
# graph_dic.update(graph_dic_scene)
rmet_acts={k:v for v,k in met_acts.items()}
import pandas as pd
attr_dict_graph={5:'count_of_speech_characters',1:'gender',2:'social_status',3:'alliance',4:'drive'}
# print ww.result
index_graph=1000
if index_graph !=1000:
play_st="Macbeth's Act"
H = graph_dic[ract_dic[cnum[index_graph]]]
else:
# print 'a'
play_st="Macbeth Network"
H=graph_dic['(all Acts)']
act_st=rmet_acts[index_graph]
# select_attribute=select_attributes.result
# print select_attribute
# print play_st,act_st
try:
f=open('positions_of_Mc_Shake.dmp')
pos_dict=pickle.load(f)
pos =pos_dict[index_graph]
except:
pos=nx.spring_layout(H,scale=50)
pos_dict[index_graph]=pos
print "The number of actors in %s %s is %i" %(play_st,act_st, len(H.nodes()))
print "The number of conversational relationships in %s %s is %i" %(play_st,act_st, len(H.edges()))
print
print 'WEIGHTED CONVERSATIONAL RELATIONSHIPS:'
print
for edge in H.edges(data=True):
print '(%s, %s)' %(nodper_dic[edge[0]],nodper_dic[edge[1]]),edge[2]['weight']
print
sstt=play_st[:7]+' '+act_st+ ' '+'Network'
H.remove_nodes_from(nx.isolates(H))
posit=draw_network(H,sstt,pos=pos,with_edgewidth=True,withLabels=True,pernode_dict=pernode_dict,labfs=10,valpha=0.4,ealpha=0.4,labelfont=10)
draw_assor_attr_subplots(H,pos,sstt,attr_dict_graph,lis_pret,label_font=10,titlefont=20)
draw_centralities_subplots(H,pos,withLabels=True,labfs=15)
labels={i:v for v,i in pernode_dict.items() if i in H.nodes()}
print 'Correspondence between IDs of nodes and names of actors:'
print
print labels
from chAs import draw_comms,modul_arity
import community as comm
# G = graph_dic[ract_dic[cnum[0]]]
H.remove_nodes_from(nx.isolates(H))
labels={i:v for v,i in pernode_dict.items() if i in H.nodes()}
part=comm.best_partition(H)
print 'Number of communities of %s = %i' %(sstt, max(part.values())+1)
print 'Community partition of %s:' %(sstt)
parLis=[]
partdi={}
for i,k in part.items():
if k not in partdi:
partdi[k]=[nodper_dic[i]]
else:
partdi[k].append(nodper_dic[i])
for i,k in partdi.items():
parLis.append(k)
print parLis
print 'Community modularity of %s = %.4f' %(sstt, comm.modularity(part,H))
d=0.8
dd=0.8
c=1.2
cc=1.4
alpha=0.2
ealpha=.2
vcc={}
sstta="The %s %s Communities" %(max(part.values())+1,sstt)
draw_comms(H,H.nodes(),[],[],[] ,part,part,d,dd,c,cc,alpha,ealpha,nodper_dic,sstta,titlefont=20,labelfont=10)
import pandas as pd
attr_dict_graph={5:'count_of_speech_characters',1:'gender',2:'social_status',3:'alliance',4:'drive'}
# print ww.result
# index_graph=ww.result
index_graph=0
if index_graph !=1000:
play_st="Macbeth's Act"
H = graph_dic[ract_dic[cnum[index_graph]]]
else:
# print 'a'
play_st="Macbeth Network"
H=graph_dic['(all Acts)']
act_st=rmet_acts[index_graph]
# select_attribute=select_attributes.result
# print select_attribute
# print play_st,act_st
try:
f=open('positions_of_Mc_Shake_acts.dmp')
pos_dict=pickle.load(f)
pos =pos_dict[index_graph]
except:
pos=nx.spring_layout(H,scale=50)
pos_dict[index_graph]=pos
# print sorted(pos_dict.keys())
# print graph_dic.keys()
# for jjj in pos_dict:
# print jjj
# print sorted(pos_dict[jjj].keys())
# print sorted(H.nodes())
print "The number of actors in %s %s is %i" %(play_st,act_st, len(H.nodes()))
print "The number of conversational relationships in %s %s is %i" %(play_st,act_st, len(H.edges()))
print
print 'WEIGHTED CONVERSATIONAL RELATIONSHIPS:'
print
for edge in H.edges(data=True):
print '(%s, %s)' %(nodper_dic[edge[0]],nodper_dic[edge[1]]),edge[2]['weight']
print
sstt=play_st[:7]+' '+act_st+ ' '+'Network'
H.remove_nodes_from(nx.isolates(H))
posit=draw_network(H,sstt,pos=pos,with_edgewidth=True,withLabels=True,pernode_dict=pernode_dict,labfs=10,valpha=0.4,ealpha=0.4,labelfont=20)
draw_assor_attr_subplots(H,pos,sstt,attr_dict_graph,lis_pret,label_font=10,titlefont=20)
draw_centralities_subplots(H,pos,withLabels=True,labfs=15)
labels={i:v for v,i in pernode_dict.items() if i in H.nodes()}
print 'Correspondence between IDs of nodes and names of actors:'
print
print labels
from chAs import draw_comms,modul_arity
import community as comm
# G = graph_dic[ract_dic[cnum[0]]]
H.remove_nodes_from(nx.isolates(H))
labels={i:v for v,i in pernode_dict.items() if i in H.nodes()}
part=comm.best_partition(H)
print 'Number of communities of %s = %i' %(sstt, max(part.values())+1)
print 'Community partition of %s:' %(sstt)
parLis=[]
partdi={}
for i,k in part.items():
if k not in partdi:
partdi[k]=[nodper_dic[i]]
else:
partdi[k].append(nodper_dic[i])
for i,k in partdi.items():
parLis.append(k)
print parLis
print 'Community modularity of %s = %.4f' %(sstt, comm.modularity(part,H))
d=0.8
dd=0.8
c=1.2
cc=1.4
alpha=0.2
ealpha=.2
vcc={}
sstta="The %s %s Communities" %(max(part.values())+1,sstt)
draw_comms(H,H.nodes(),[],[],[] ,part,part,d,dd,c,cc,alpha,ealpha,nodper_dic,sstta,titlefont=20,labelfont=20)
import pandas as pd
attr_dict_graph={5:'count_of_speech_characters',1:'gender',2:'social_status',3:'alliance',4:'drive'}
# print ww.result
index_graph=1
if index_graph !=1000:
play_st="Macbeth's Act"
H = graph_dic[ract_dic[cnum[index_graph]]]
else:
# print 'a'
play_st="Macbeth Network"
H=graph_dic['(all Acts)']
act_st=rmet_acts[index_graph]
# select_attribute=select_attributes.result
# print select_attribute
# print play_st,act_st
try:
f=open('positions_of_Mc_Shake_acts.dmp')
pos_dict=pickle.load(f)
pos =pos_dict[index_graph]
except:
pos=nx.spring_layout(H,scale=50)
pos_dict[index_graph]=pos
print "The number of actors in %s %s is %i" %(play_st,act_st, len(H.nodes()))
print "The number of conversational relationships in %s %s is %i" %(play_st,act_st, len(H.edges()))
print
print 'WEIGHTED CONVERSATIONAL RELATIONSHIPS:'
print
for edge in H.edges(data=True):
print '(%s, %s)' %(nodper_dic[edge[0]],nodper_dic[edge[1]]),edge[2]['weight']
print
sstt=play_st[:7]+' '+act_st+ ' '+'Network'
H.remove_nodes_from(nx.isolates(H))
posit=draw_network(H,sstt,pos=pos,with_edgewidth=True,withLabels=True,pernode_dict=pernode_dict,labfs=10,valpha=0.4,ealpha=0.4,labelfont=20)
draw_assor_attr_subplots(H,pos,sstt,attr_dict_graph,lis_pret,label_font=10,titlefont=20)
draw_centralities_subplots(H,pos,withLabels=True,labfs=15)
labels={i:v for v,i in pernode_dict.items() if i in H.nodes()}
print 'Correspondence between IDs of nodes and names of actors:'
print
print labels
from chAs import draw_comms,modul_arity
import community as comm
# G = graph_dic[ract_dic[cnum[0]]]
H.remove_nodes_from(nx.isolates(H))
labels={i:v for v,i in pernode_dict.items() if i in H.nodes()}
part=comm.best_partition(H)
print 'Number of communities of %s = %i' %(sstt, max(part.values())+1)
print 'Community partition of %s:' %(sstt)
parLis=[]
partdi={}
for i,k in part.items():
if k not in partdi:
partdi[k]=[nodper_dic[i]]
else:
partdi[k].append(nodper_dic[i])
for i,k in partdi.items():
parLis.append(k)
print parLis
print 'Community modularity of %s = %.4f' %(sstt, comm.modularity(part,H))
d=0.8
dd=0.8
c=1.2
cc=1.4
alpha=0.2
ealpha=.2
vcc={}
sstta="The %s %s Communities" %(max(part.values())+1,sstt)
draw_comms(H,H.nodes(),[],[],[] ,part,part,d,dd,c,cc,alpha,ealpha,nodper_dic,sstta,titlefont=20,labelfont=20)
import pandas as pd
attr_dict_graph={5:'count_of_speech_characters',1:'gender',2:'social_status',3:'alliance',4:'drive'}
# print ww.result
index_graph=2
if index_graph !=1000:
play_st="Macbeth's Act"
H = graph_dic[ract_dic[cnum[index_graph]]]
else:
# print 'a'
play_st="Macbeth Network"
H=graph_dic['(all Acts)']
act_st=rmet_acts[index_graph]
# select_attribute=select_attributes.result
# print select_attribute
# print play_st,act_st
try:
f=open('positions_of_Mc_Shake_acts.dmp')
pos_dict=pickle.load(f)
pos =pos_dict[index_graph]
except:
pos=nx.spring_layout(H,scale=50)
pos_dict[index_graph]=pos
print "The number of actors in %s %s is %i" %(play_st,act_st, len(H.nodes()))
print "The number of conversational relationships in %s %s is %i" %(play_st,act_st, len(H.edges()))
print
print 'WEIGHTED CONVERSATIONAL RELATIONSHIPS:'
print
for edge in H.edges(data=True):
print '(%s, %s)' %(nodper_dic[edge[0]],nodper_dic[edge[1]]),edge[2]['weight']
print
sstt=play_st[:7]+' '+act_st+ ' '+'Network'
H.remove_nodes_from(nx.isolates(H))
posit=draw_network(H,sstt,pos=pos,with_edgewidth=True,withLabels=True,pernode_dict=pernode_dict,labfs=10,valpha=0.4,ealpha=0.4,labelfont=20)
draw_assor_attr_subplots(H,pos,sstt,attr_dict_graph,lis_pret,label_font=10,titlefont=20)
draw_centralities_subplots(H,pos,withLabels=True,labfs=15)
labels={i:v for v,i in pernode_dict.items() if i in H.nodes()}
print 'Correspondence between IDs of nodes and names of actors:'
print
print labels
from chAs import draw_comms,modul_arity
import community as comm
# G = graph_dic[ract_dic[cnum[0]]]
H.remove_nodes_from(nx.isolates(H))
labels={i:v for v,i in pernode_dict.items() if i in H.nodes()}
part=comm.best_partition(H)
print 'Number of communities of %s = %i' %(sstt, max(part.values())+1)
print 'Community partition of %s:' %(sstt)
parLis=[]
partdi={}
for i,k in part.items():
if k not in partdi:
partdi[k]=[nodper_dic[i]]
else:
partdi[k].append(nodper_dic[i])
for i,k in partdi.items():
parLis.append(k)
print parLis
print 'Community modularity of %s = %.4f' %(sstt, comm.modularity(part,H))
d=0.8
dd=0.8
c=1.2
cc=1.4
alpha=0.2
ealpha=.2
vcc={}
sstta="The %s %s Communities" %(max(part.values())+1,sstt)
draw_comms(H,H.nodes(),[],[],[] ,part,part,d,dd,c,cc,alpha,ealpha,nodper_dic,sstta,titlefont=20,labelfont=20)
import pandas as pd
attr_dict_graph={5:'count_of_speech_characters',1:'gender',2:'social_status',3:'alliance',4:'drive'}
# print ww.result
index_graph=3
if index_graph !=1000:
play_st="Macbeth's Act"
H = graph_dic[ract_dic[cnum[index_graph]]]
else:
# print 'a'
play_st="Macbeth Network"
H=graph_dic['(all Acts)']
act_st=rmet_acts[index_graph]
# select_attribute=select_attributes.result
# print select_attribute
# print play_st,act_st
try:
f=open('positions_of_Mc_Shake_acts.dmp')
pos_dict=pickle.load(f)
pos =pos_dict[index_graph]
except:
pos=nx.spring_layout(H,scale=50)
pos_dict[index_graph]=pos
print "The number of actors in %s %s is %i" %(play_st,act_st, len(H.nodes()))
print "The number of conversational relationships in %s %s is %i" %(play_st,act_st, len(H.edges()))
print
print 'WEIGHTED CONVERSATIONAL RELATIONSHIPS:'
print
for edge in H.edges(data=True):
print '(%s, %s)' %(nodper_dic[edge[0]],nodper_dic[edge[1]]),edge[2]['weight']
print
sstt=play_st[:7]+' '+act_st+ ' '+'Network'
H.remove_nodes_from(nx.isolates(H))
posit=draw_network(H,sstt,pos=pos,with_edgewidth=True,withLabels=True,pernode_dict=pernode_dict,labfs=10,valpha=0.4,ealpha=0.4,labelfont=20)
draw_assor_attr_subplots(H,pos,sstt,attr_dict_graph,lis_pret,label_font=10,titlefont=20)
draw_centralities_subplots(H,pos,withLabels=True,labfs=15)
labels={i:v for v,i in pernode_dict.items() if i in H.nodes()}
print 'Correspondence between IDs of nodes and names of actors:'
print
print labels
from chAs import draw_comms,modul_arity
import community as comm
# G = graph_dic[ract_dic[cnum[0]]]
H.remove_nodes_from(nx.isolates(H))
labels={i:v for v,i in pernode_dict.items() if i in H.nodes()}
part=comm.best_partition(H)
print 'Number of communities of %s = %i' %(sstt, max(part.values())+1)
print 'Community partition of %s:' %(sstt)
parLis=[]
partdi={}
for i,k in part.items():
if k not in partdi:
partdi[k]=[nodper_dic[i]]
else:
partdi[k].append(nodper_dic[i])
for i,k in partdi.items():
parLis.append(k)
print parLis
print 'Community modularity of %s = %.4f' %(sstt, comm.modularity(part,H))
d=0.8
dd=0.8
c=1.2
cc=1.4
alpha=0.2
ealpha=.2
vcc={}
sstta="The %s %s Communities" %(max(part.values())+1,sstt)
draw_comms(H,H.nodes(),[],[],[] ,part,part,d,dd,c,cc,alpha,ealpha,nodper_dic,sstta,titlefont=20,labelfont=20)
import pandas as pd
attr_dict_graph={5:'count_of_speech_characters',1:'gender',2:'social_status',3:'alliance',4:'drive'}
# print ww.result
index_graph=4
if index_graph !=1000:
play_st="Macbeth's Act"
H = graph_dic[ract_dic[cnum[index_graph]]]
else:
# print 'a'
play_st="Macbeth Network"
H=graph_dic['(all Acts)']
act_st=rmet_acts[index_graph]
# select_attribute=select_attributes.result
# print select_attribute
# print play_st,act_st
try:
f=open('positions_of_Mc_Shake_acts.dmp')
pos_dict=pickle.load(f)
pos =pos_dict[index_graph]
except:
pos=nx.spring_layout(H,scale=50)
pos_dict[index_graph]=pos
print "The number of actors in %s %s is %i" %(play_st,act_st, len(H.nodes()))
print "The number of conversational relationships in %s %s is %i" %(play_st,act_st, len(H.edges()))
print
print 'WEIGHTED CONVERSATIONAL RELATIONSHIPS:'
print
for edge in H.edges(data=True):
print '(%s, %s)' %(nodper_dic[edge[0]],nodper_dic[edge[1]]),edge[2]['weight']
print
sstt=play_st[:7]+' '+act_st+ ' '+'Network'
H.remove_nodes_from(nx.isolates(H))
posit=draw_network(H,sstt,pos=pos,with_edgewidth=True,withLabels=True,pernode_dict=pernode_dict,labfs=10,valpha=0.4,ealpha=0.4,labelfont=20)
draw_assor_attr_subplots(H,pos,sstt,attr_dict_graph,lis_pret,label_font=10,titlefont=20)
draw_centralities_subplots(H,pos,withLabels=True,labfs=15)
labels={i:v for v,i in pernode_dict.items() if i in H.nodes()}
print 'Correspondence between IDs of nodes and names of actors:'
print
print labels
from chAs import draw_comms,modul_arity
import community as comm
# G = graph_dic[ract_dic[cnum[0]]]
H.remove_nodes_from(nx.isolates(H))
labels={i:v for v,i in pernode_dict.items() if i in H.nodes()}
part=comm.best_partition(H)
print 'Number of communities of %s = %i' %(sstt, max(part.values())+1)
print 'Community partition of %s:' %(sstt)
parLis=[]
partdi={}
for i,k in part.items():
if k not in partdi:
partdi[k]=[nodper_dic[i]]
else:
partdi[k].append(nodper_dic[i])
for i,k in partdi.items():
parLis.append(k)
print parLis
print 'Community modularity of %s = %.4f' %(sstt, comm.modularity(part,H))
d=0.8
dd=0.8
c=1.2
cc=1.4
alpha=0.2
ealpha=.2
vcc={}
sstta="The %s %s Communities" %(max(part.values())+1,sstt)
draw_comms(H,H.nodes(),[],[],[] ,part,part,d,dd,c,cc,alpha,ealpha,nodper_dic,sstta,titlefont=20,labelfont=20)
f=open('positions_of_Mc_Shake_acts.dmp','w')
# print pos_dict[0]
# print pos_dict[0].keys()
pickle.dump(pos_dict,f)
f.close()