-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathentityEtra.py
More file actions
executable file
·157 lines (128 loc) · 5.46 KB
/
entityEtra.py
File metadata and controls
executable file
·157 lines (128 loc) · 5.46 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
import os
import shutil
import time
import itertools
import re
import nltk
from nltk import ne_chunk, pos_tag, word_tokenize
from nltk.tree import Tree
import requests
dir_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(dir_path)
relationToUse = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
mainSim, hashOut = 0, 0
subToUse, objToUse = '', ''
def get_continuous_chunks(text):
chunked = ne_chunk(pos_tag(word_tokenize(text)))
prev = None
continuous_chunk = []
current_chunk = []
for i in chunked:
if type(i) == Tree:
current_chunk.append(" ".join([token for token, pos in i.leaves()]))
elif current_chunk:
named_entity = " ".join(current_chunk)
if named_entity not in continuous_chunk:
continuous_chunk.append(named_entity)
current_chunk = []
else:
continue
return continuous_chunk
def readFile(fileName, context):
start_time = time.time()
dir_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(dir_path)
try:
reader = open(fileName, "r")
writer = open('output.nq', 'w+')
except Exception as e:
print(e)
else:
parity = itertools.cycle([True, False])
for line in reader:
line = line.replace('%20', ',').replace('%2C', ',')
if line.startswith("_:"):
writer.write('{} <{}> .'.format(line.strip().rstrip(' .'), context))
writer.write('\n')
continue
if line.isspace():
continue
if next(parity):
line1 = line
writer.write('{} <{}> .'.format(line1.strip().rstrip(' .'), context))
writer.write('\n')
line1 = re.findall('<([^>]*)>', line)
if len(line1) == 3:
sub_line1, rel_line1, obj_line1 = line1[0], line1[1], line1[2]
elif len(line1) == 2:
sub_line1, rel_line1 = line1[0], line1[1]
obj_line1 = re.findall('"([^"]*)"', line)[0]
else:
pass
finalSub = ''
sub1 = sub_line1
sub_line1 = sub_line1.split('/')[-1].upper()
try:
extractedSub = get_continuous_chunks(sub_line1.upper())
print("sub ext", extractedSub)
extractedSubLength = len(extractedSub)
except Exception as e:
print(e)
else:
# print("NLTK works perfectly")
if extractedSubLength == 0:
pass
elif extractedSubLength == 1:
finalSub = extractedSub[0]
else:
writer.write(
'<{}> <{}isA> <{}> <{}> .'.format(sub1, relationToUse,
'/'.join(sub1.split('/')[:-1]) + '/' + ','.join(
extractedSub),
context))
writer.write('\n')
# writer.write('{}'.format(extractedSub))
else:
line2 = line
writer.write('{} <{}> .'.format(line2.strip().rstrip(' .'), context))
writer.write('\n')
line2 = re.findall('<([^>]*)>', line)
if len(line2) == 3:
sub_line2, rel_line2, obj_line2 = line2[0], line2[1], line2[2]
elif len(line2) == 2:
sub_line2, rel_line2 = line2[0], line2[1]
obj_line2 = re.findall('"([^"]*)"', line)[0]
else:
pass
# writer.write("Both line: {}{}".format(line1, line2))
finalObj = ''
sub2 = sub_line2
sub_line2 = sub_line2.split('/')[-1].upper()
try:
extractedObj = get_continuous_chunks(sub_line2.upper())
print("obj ext", extractedObj)
extractedObjLength = len(extractedObj)
except Exception as e:
print(e)
else:
# print("NLTK works perfectly")
if extractedObjLength == 0:
pass
elif extractedObjLength == 1:
finalObj = extractedObj[0]
else:
writer.write('<{}> <{}isA> <{}> <{}> .'.format(sub2, relationToUse,
'/'.join(sub2.split('/')[:-1]) + '/' + ','.join(
extractedObj), context))
writer.write('\n')
sub_line1, sub_line2 = sub_line1.lower(), sub_line2.lower()
# Calculating the similarity between two normal words that have not been changed
finalSub, finalObj = finalSub.lower(), finalObj.lower()
extractedSub = [i.lower() for i in extractedSub]
extractedObj = [i.lower() for i in extractedObj]
writer.flush()
writer.close()
readIn = open('output.nq')
data = readIn.read().replace('<', '<').replace('>', '>').replace('\n', '</br>')
readIn.close()
return data