#!/usr/bin/env python
# coding=utf-8

from nlp.CoreNLPClient import  NERTag
import json
from DBUtils import GetCveList, GetSummaryByCve
import sys
from celery import Celery

app = Celery('tasks')
app.config_from_object('celeryconfig')

@app.task
def NamedEntityExtract(text):
    result = NERTag(text.encode("utf-8"))
    sentence = result["sentences"][0]
    tokens = sentence["tokens"]

    phrases = []

    i = 0
    while i < len(tokens):
        tag = tokens[i]['ner']
        if tag not in ['O', 'NUMBER', 'MONEY', 'CVE']:
            words = []
            while i < len(tokens) and tokens[i]['ner'] == tag:
                words.append(tokens[i]['word'])
                i+=1
            phrases.append(words)
        else:
            i+=1
    return phrases, text


def main():
    # text = """_functions.php in cpCommerce 1.2.x, possibly including 1.2.9, sends a redirect but does not exit when it is called directly, which allows remote attackers to bypass a protection mechanism to conduct remote file inclusion and directory traversal attacks, execute arbitrary PHP code, or read arbitrary files via the GLOBALS[prefix] parameter, a different vector than CVE-2003-1500."""
    # text = """Directory traversal vulnerability in index.php in phpSysInfo 2.5.1 allows remote attackers to determine the existence of arbitrary files via a .. (dot dot) sequence and a trailing null (%00) byte in the lng parameter, which will display a different error message if the file exists. Is it true?"""

    cnt = 0
    for cve, in GetCveList():
        cnt += 1
        summary = GetSummaryByCve(cve)
        # print type(summary)
        try:
            result = NERTag(summary.encode("utf-8"))
            sentence = result["sentences"][0]
            tokens = sentence["tokens"]

            i = 0
            while i < len(tokens):
                tag = tokens[i]['ner']
                if tag not in ['O', 'NUMBER', 'MONEY', 'CVE']:
                    words = []
                    while i < len(tokens) and tokens[i]['ner'] == tag:
                        words.append(tokens[i]['word'])
                        i+=1
                    print "{}\t{}".format(','.join(words), tag)
                else:
                    i+=1


            # for token in tokens:
            #     if token['ner']!="O" and token['ner']!='NUMBER':
            #         print token['word'], token['ner']
        except:
            import traceback
            traceback.print_exc()
            sys.stderr.write(summary+"\n")
        
        
        # if cnt >= 100
            # break

if __name__=="__main__":
    main()

