-
Notifications
You must be signed in to change notification settings - Fork 1
/
connectome.py
93 lines (77 loc) · 3.57 KB
/
connectome.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import re
import string
from time import time
import json
from collections import Counter
from gensim import utils, corpora, models, similarities
from gensim.parsing import preprocessing
import networkx
from networkx.readwrite import json_graph
import matplotlib
matplotlib.use('Agg')
import pylab
prefix = ['sicp', 'dirac', 'dirac_sections', 'sicm', 'som'][-1]
metadata = open('texts/'+prefix+'/metadata.txt','r').read().split('\n')[:-1]
wordcount, book, labels = zip(*[i.split('|') for i in metadata])
def prepare_corpus(documents):
tic = time()
# lower, strip tags, strip punctuation, strip multiple whitespaces,
# strip numeric, remove stopwords, strip short, stem text
texts = preprocessing.preprocess_documents(documents)
#filter out hapax legomena
all_tokens = sum(texts, [])
tokens_once = set(word for word in set(all_tokens) if all_tokens.count(word)==1)
texts = [[word for word in text if word not in tokens_once]
for text in texts]
print(time() - tic)
dictionary = corpora.Dictionary(texts)
dictionary.save('texts/'+prefix+'/dictionary.dict')
raw_corpus = [dictionary.doc2bow(text) for text in texts]
corpora.MmCorpus.serialize('texts/'+prefix+"/corpus.mm", raw_corpus)
if (prefix == 'sicp') or (prefix == 'sicm'):
#strip unreadable characters
labels = [re.sub('\xc2|\xa0|\xe2|\x80|\x94',' ',i) for i in labels]
# remove section name from labels
#labels = [ ((l[0].isdigit() and l[2].isdigit()) and l.split(' ')[0]) or l for l in labels]
groups = [(l[0].isdigit() and l[0]) or 0 for l in labels]
elif prefix == 'dirac':
groups = [l.split('.')[0] for l in labels]
elif prefix == 'dirac_sections':
labels = [ l.split('.')[0] for l in labels if l[0].isdigit()]
groupdic = {d[0]:d[1] for d in [i.split(' ') for i in open('texts/'+prefix+'/metadata_extra.txt','r').read().strip().split('\n')]}
groups = [groupdic[i] for i in labels]
else:
groups = [l.split('.')[0] for l in labels]
#step 1 prepare corpus
#prepare_corpus([open('texts/'+section,'r').read() for section in book])
dictionary = corpora.Dictionary.load('texts/'+prefix+'/dictionary.dict')
corpus = corpora.MmCorpus('texts/'+prefix+"/corpus.mm")
#step 2 create tf-idf model
tfidf = models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus] # convert vector space to tfidf space
#step 2.1 create LDA model
numTopics = 50
lda = models.ldamodel.LdaModel(corpus_tfidf, id2word=dictionary, num_topics=numTopics)
topics = lda.show_topics(num_topics=numTopics)
#print topics
for text in corpus:
for id,freq in lda[text]:
print(dictionary[id], freq)
#step 3 create similarity matrix
index = similarities.Similarity('/tmp/tst', corpus_tfidf.corpus, num_features=corpus.num_terms+1)
sims = index[corpus_tfidf]
#step 3.1
percentile = {'sicp': 90, 'sicm':95, 'dirac':60, 'dirac_sections':95, 'som': 98}[prefix]
sims[sims < pylab.percentile(sims, percentile)] = 0
#step 4 convert datatype to networkx Graph
print("converting similarity matrix to networkx Graph")
sims = networkx.Graph(sims, node_list=list(range(len(book))))
networkx.set_node_attributes(sims, 'name', {x:y for x,y in enumerate(labels)})
networkx.set_node_attributes(sims, 'group', {x:y for x,y in enumerate(groups)})
wordcount_normalize = {'sicp':1000,'sicm':500}.get(prefix,1000)
networkx.set_node_attributes(sims, 'wordcount', {x:float(y)/wordcount_normalize for x,y in enumerate(wordcount)})
#step 5: dump json for visualization in d3.js
json_data = json_graph.node_link_data(sims)
json.dump(json_data, open('docs/json/'+prefix+'.json', 'w'), indent=4)
#networkx.draw(sims)
#savefig('graph.png')