-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsetup1.py
108 lines (98 loc) · 3.44 KB
/
setup1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import app.parser.getData as importArticles
import app.parser.articleRetrieval.getArticles as getContent
import app.parser.sentences as sent
import app.parser.getChunks as gc
import app.analytics.tag as tag
import app.parser.articleRetrieval.wikipediaParse as wp
import app.analytics.features as fe
import app.analytics.functions.hasDate as hd
from sklearn import tree, feature_extraction
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
import datetime
trainArticles= open('singleSets.txt','r').readlines()#=importArticles.getData('train')
testArticles = open('singleSetTest.txt','r').readlines()#= importArticles.getData('test')
doubles = open('doubleSets.txt','r').readlines()
doubleSets = eval(doubles[0])
print len(trainArticles)
print len(testArticles)
listOfYears = []
clf = tree.DecisionTreeClassifier()
probs = []
titles = []
#A
def getArticles(articleList):
singleSets = []
for article in articleList:
try:
chunks = gc.getChunks(article[1])
tags = tag.getTags(article[1],chunks)
if tags == []:
continue # check this is right. go to next itteration
"""The Stanford Open IE tags"""
subject = tags['subject']
relation = tags['relation']
objects = tags['object']
objects = objects.split()
content = wp.getArticle(subject)
rawSentences = sent.getSentences(content)
sentences = []
for sentence in rawSentences:
if(hd.hasDate(sentence) !== []):
sentences.append(sentence)
listOfYears.append(article[0])
SS = {'title':article[1], 'sentences':sentences, 'year':article[0]}
singleSets.append(SS)
except:
pass
return singleSets
#B
def generateDataPoints(singleSets):
doubleSets = []
for i in range(len(singleSets)):
print str(i) + str(len(singleSets))
for j in range(i+1, len(singleSets)):
I = eval(singleSets[i])
J = eval(singleSets[j])
if(I['year'] < J['year']):
b = 1
else:
b = 0
doubleSets.append({'title1':I['title'],'sentences1':I['sentences'],\
'title2':J['title'],'sentences2': J['sentences'],\
'year':b, 'vocab':set(I['sentences'] + J['sentences'])})
doubles.write(str(doubleSets))
print doubleSets
return doubleSets
#C
def train(doubleSets):
bools = []
features = []
for item in doubleSets:
bools.append(item['year'])
vec = fe.get(item['sentences1'],item['sentences2'])
features.append(vec)
print "Training The Classifier."
clf.fit(features,bools)
def test(doubleSets):
bools = []
features = []
correct = 0
incorrect = 0
for item in doubleSets:
bools.append(item['year'])
vec = fe.get(item['sentences1'],item['sentences2'])
titles.append([item['title1'],item['title2']])
features.append(vec)
for feature in range(len(features)):
predict = clf.predict(np.array9[features[feature]]))
prob = clf.predict_proba(np.arrat([features[feature]]))
probs.append([predict,prob, bools[feature]])
print "beginning training"
train(doubleSets)
print "Training Complere. Now For Testing"
test(generateDataPoints(testArticles))
print zip(titles,probs)