# from stanfordcorenlp import StanfordCoreNLP
# nlp = StanfordCoreNLP(r'/Users/yuronan/develop/code/python/stanford-corenlp-4.3.2', lang='en')
# f = open('./data/senticnet_word.txt', 'w+')
# count = 0
# for line in set(open('data/origin/senticnet5.txt')):
#     line_list = line.strip().split('\t')
#     if ' ' in line or '_' in line:
#         continue
#     count += 1
#     if nlp.pos_tag(line_list[0]):
#         f.write(nlp.pos_tag(line_list[0])[0][0] + ',' + nlp.pos_tag(line)[0][1] + ',' + line_list[len(line_list) - 3][:3] + ',' +'s\n')
# print(count)
# f.close()
pos_count = 0
neg_count = 0
count = 0
for line in open('data/sentic_hownet.txt'):
    line_list = line.strip().split(',')
    count += 1
    if line_list[2] == 'pos':
        pos_count += 1
    elif line_list[2] == 'neg':
        neg_count += 1
    else:
        print(line_list)
print(pos_count)
print(neg_count)
print(count)
