forked from maximilianh/pubMunch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
pubFindGenes
executable file
·347 lines (289 loc) · 12.5 KB
/
pubFindGenes
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
#!/usr/bin/env python
# load default python packages
import logging, optparse, sys, glob, gzip, gdbm, marshal, zlib, copy, struct, operator
from os.path import join, basename, isfile, dirname, abspath, splitext, isdir
from collections import defaultdict, Counter, namedtuple
# I highly recommend installing re2, it's way faster than re
# we fallback to re just in case
try:
import re2 as re
except ImportError:
import re
# add <scriptDir>/lib/ to package search path
sys.path.insert(0, join(dirname(abspath(__file__)), "lib"))
import pubGeneric, maxCommon, pubConf, maxbio, pubAlg, maxTables, pslMapBed, pubMap
import pubDnaFind
from pycbio.hgdata.Psl import Psl
import geneFinder
import tabfile
#import kent.psl, kent.pslTransMap
def readFile(inFname):
logging.debug("Reading file %s" % inFname)
text = open(inFname).read()
pmid = splitext(basename(inFname))[0].split('.')[0]
return pmid, text
showOnlyErrors = False
typePmids = defaultdict(set)
pmids = set()
upData = None
entrez2sym = None
manualAnnots = {
9654228: "ocr error, 1 -> l",
20472660: "needs OMIM data",
20810036: "need OMIM dis",
18350644: "this is not an article, but a list of mutations. Accessions are not Genbank",
17438609: "this is not an article, but a list of mutations. Accessions are not Genbank",
21122112: "would need OMIM to accept any number, no regex, probably impossible"
}
def findDnaIter(text):
""" find dna in text and return as a list of tuples: (start, end, seq)
>>> list(findDnaIter(" actg catgtgtg catgtgc tgactg crap crap crap CGAT ", "mydoc"))
[('mydoc|0 False 4', 'actgcatgtgtgcatgtgctgactg')]
"""
#i = 0
for row in pubDnaFind.nucleotideOccurrences(text):
if row.seq=="": # can only happen if seq is a restriction site
continue
#seqId = docId+"|"+str(i)
#seqId = docId+"|"+str(row.start)+"-"+str(row.end)
#seqId = "%s %s %s" % (seqId, row.tainted, row.partCount)
yield row.start, row.end, row.seq
#i+=1
def findInLocalFile(inFname, refGenes, foundGenes, paperMetaData, minScore, maxRank):
pmid, text = readFile(inFname)
if len(text)<30:
logging.info("empty text file")
return
wordCount = len(text.split())
if pmid not in refGenes and not len(refGenes)==0:
logging.info("no annotations for pmid %s" % pmid)
return
seqCacheFname = join(dirname(inFname), "seqCache.gdbm")
logging.debug("Opening seqCache %s" % seqCacheFname)
seqCache = gdbm.open(seqCacheFname, "w")
#genes = geneFinder.findGenesResolveByType(text, pmid=pmid, seqCache=seqCache)
geneScores, geneSupp = geneFinder.rankGenes(text, pmid, seqCache)
mutDataDir = pubConf.geneDataDir
global upData, entrez2sym
if upData==None:
fname = join(mutDataDir, "uniprot.tab.marshal")
upData = marshal.load(open(fname))
fname = join(mutDataDir, "entrez.9606.tab.marshal")
entrezRefseq = marshal.load(open(fname))
entrez2sym = entrezRefseq["entrez2sym"]
pmids.add(pmid)
allGenesFound = set()
foundLines = []
rank = 0
topGenes = []
for geneId, score in geneScores:
rank += 1
mentionList = geneSupp[geneId]
for markerType, idStr, locs in mentionList:
logging.debug("Found matches for type %s, gene %s, marker %s" % (markerType, geneId, idStr))
logging.debug("Text: %s" % [text[start:end] for start, end in locs])
desc = "notAnnotated"
if geneId==None:
desc = "notValidIdentifier"
else:
# only count a gene as found if it's an unambiguous symbol match with count > 3
# or something better than a symbol
#support = len(locs)
#minSupport = wordCount / 1200
#if minScore!=None:
#minSupport = reqMinSupp
# old rule, for invitae
#mType=="symbol" and (len(locs)>(wordCount/1200)) or \
#mType=="symbolMaybe" and (len(locs)>10):
# accept anythin with more than x mentions
#if mType not in ["symbolMaybe", "symbol"] or \
#mType in ["symbol", "symbolMaybe"] and support > minSupport:
if (minScore==None or score >= minScore) and \
(maxRank==None or rank <= maxRank):
# consider this gene "found" only if conditions are fullfilled
allGenesFound.add(geneId)
predPair = (pmid, geneId)
foundGenes.add( predPair )
if pmid in refGenes and geneId in refGenes[pmid]:
desc = "annotated"
logging.debug("found genes: %s" % foundGenes)
# make snippets
snips = []
hits = []
for loc in locs:
start, end = loc
snip = pubAlg.getSnippet(text, start, end, maxContext=20)
snips.append(snip)
logging.debug("Snip: %s" % snip)
hitText = text[start:end]
if hitText not in hits:
hits.append(hitText)
oneSnip = ""
if len(snips)>0:
oneSnip = snips[0]
oneSnip = oneSnip.replace("\n", " ")
hits = [h.replace("\n"," ") for h in hits]
row = [ pmid, geneFinder.entrezSymbol(geneId), str(score), markerType, str(idStr), \
",".join(hits), str(len(locs)), desc, oneSnip]
foundLines.append("\t".join(row))
if geneId!=None:
typePmids[markerType].add(pmid)
annotLines = []
if pmid in refGenes or len(refGenes)==0:
entrezList = set(refGenes.get(pmid, []))
allFound = True
for entrez in entrezList:
entrez = int(entrez)
if entrez not in entrez2sym:
logging.error("%s in gold standard file is not a valid gene id (with a chrom location)" % entrez)
continue
sym = entrez2sym[entrez]
if entrez in allGenesFound:
desc = "found"
else:
desc = "notFound"
allFound = False
annotLines.append( "reference annotation: entrezID %d symbol %s %s "% (entrez, sym, desc))
if (not showOnlyErrors) or (not allFound):
print "PMID",pmid
meta = paperMetaData.get(str(pmid), None)
year = "unknown"
if meta!=None:
year = meta.year
print "Size: %d words, publYear %s" % (wordCount, year)
wc = Counter(text.split())
wcParts = []
topWords = []
for w, count in wc.most_common(5):
wcParts.append("%s=%d" % (w, count))
topWords.append(w)
print "most common words: "+",".join(wcParts)
if "the" not in topWords:
print "** %s PDF corrupted?" % pmid
if "Novel human pathological mutations" in text:
print "** %s Not a real article" % pmid
if pmid.isdigit() and int(pmid) in manualAnnots:
print "**", manualAnnots.get(int(pmid), "")
print "\n".join(annotLines)
print "\n".join(foundLines)
print "-----"
else:
print "PMID %s NOT FOUND" % str(pmid)
def parsePaperData(inDir):
fname = join(inDir, "textData.tab")
if not isfile(fname):
fname = join(inDir, "..", "articleMeta.tab")
if not isfile(fname):
return {}
data = {}
for row in maxCommon.iterTsvRows(fname):
data[row.pmid] = row
return data
def findInLocalDir(inDir, refGenes, minScore, maxRank):
fnames = glob.glob(join(inDir, "*.txt"))
#fnames = glob.glob(join(inDir, "100*.txt"))
pm = maxCommon.ProgressMeter(len(fnames))
foundGenes = set()
paperData = parsePaperData(inDir)
for fname in fnames:
#logging.info(fname)
findInLocalFile(fname, refGenes, foundGenes, paperData, minScore, maxRank)
pm.taskCompleted()
# cleanup the refgenes dict, remove pmids without any annotations
newRefGenes = {}
for pmid, genes in refGenes.iteritems():
if len(genes)!=0:
newRefGenes[pmid]=genes
refGenes = newRefGenes
print "----"
global pmids
print "total PMIDs searched: %d" % len(pmids)
#refGenesDict = dict([(pmid, refGenes[pmid]) for pmid in pmids])
pmidsWithRefs = pmids.intersection(refGenes)
print "total PMIDs searched with gene annotations: %d" % len(pmidsWithRefs)
#print pmidsWithRefs
refPairs = set()
for pmid in pmidsWithRefs:
genes = refGenes[pmid]
for gene in genes:
refPairs.add( (str(pmid), gene) )
print "total (PMID, gene)-pairs to find (searched and with ref annotations): %d" % len(refPairs)
#print refPairs
print "total (PMID, gene)-pairs found: %d" % len(foundGenes)
#print foundGenes
matchPairs = foundGenes.intersection(refPairs)
#print matchPairs, refPairs
print "total (PMIDs, gene) matches: %d" % len(matchPairs)
print "recall: %f" % (float(len(matchPairs))/len(refPairs))
print "precision: %f" % (float(len(matchPairs))/len(foundGenes))
print "total PMIDs with at least one gene match: %d" % len(set([x for x,y in matchPairs]))
# output how often identifiers were found
global typePmids
typePmids = typePmids.items()
typeCounts = []
for mType, pmids in typePmids:
typeCounts.append ((mType, len(pmids)))
typeCounts.sort(key=operator.itemgetter(1), reverse=True)
for mType, pmidCount in typeCounts:
print "DocCount %s %d" % (mType, pmidCount)
def parseRefs(fname):
data = defaultdict(list)
#for row in maxCommon.iterTsvRows(fname, headers=["pmid", "gene"]):
for line in open(fname):
if line.startswith("pmid") or line.startswith("#"):
# just skip header
continue
fields = line.strip().split("\t")
data[fields[0]].append(int(fields[1]))
return data
def main(args, options):
if options.test:
import doctest
doctest.testmod()
#runTests()
sys.exit(0)
if options.debug:
pubConf.debug = True
global showOnlyErrors
showOnlyErrors = options.onlyErrors
minScore = options.minScore
maxRank = options.maxRank
pubGeneric.setupLogging("", options)
inFname = args[0]
if len(args)>1:
refName = args[1]
refGenes = parseRefs(refName)
else:
refGenes = {}
exclMarkerTypes = []
if options.noSeqs:
exclMarkerTypes = ["dnaSeq"]
geneFinder.initData(exclMarkerTypes=exclMarkerTypes)
if isfile(inFname):
paperMetaData = parsePaperData(dirname(inFname))
findInLocalFile(inFname, refGenes, set(), paperMetaData, minScore, maxRank)
elif isdir(inFname):
findInLocalDir(inFname, refGenes, minScore, maxRank)
else:
assert(False)
#logging.info("Wrote output to %s" % outFname)
# === COMMAND LINE INTERFACE, OPTIONS AND HELP ===
parser = optparse.OptionParser("""usage: %prog [options] <inputFileOrDir> [goldStandardFile] - use all possible means to resolve genes
inputFile is a text file or a directory full of files with the extension .txt
The preferred filename format is <pmid>.<anyWords>.txt
goldStandardFile is a tab-separated file in the format <pmid>tab<entrezGene>
This file is optional. If it is present, the program will output precision, recall and F1.
""")
parser.add_option("-d", "--debug", dest="debug", action="store_true", help="show debug messages")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true", help="show more debug messages")
parser.add_option("-t", "--test", dest="test", action="store_true", help="run tests")
parser.add_option("-e", "--onlyErrors", dest="onlyErrors", action="store_true", help="print output only if a gene has been missed")
parser.add_option("-s", "--noSeqs", dest="noSeqs", action="store_true", help="Do not use sequences found in papers to resolve genes")
parser.add_option("-m", "--minScore", dest="minScore", action="store", type="int", help="minimum score of genes for benchmark (but the output shows all)")
parser.add_option("-r", "--maxRank", dest="maxRank", action="store", type="int", help="only benchmark on top x genes (but the output shows)")
(options, args) = parser.parse_args()
if args==[] and not options.test:
parser.print_help()
exit(1)
pubGeneric.setupLogging(__file__, options)
main(args, options)