import pymysql
import json
import csv
import time
from paddlenlp import Taskflow
import re
import difflib

organization_username = 'USTradeRep'
DBNAME = "twitter"
USER = "root"
PASSWORD = "root"
table_name = organization_username + '_profile'

filename_output="./data/"+organization_username+"_NER1.csv"

db = pymysql.connect(host='localhost',
                     user=USER,
                     password=PASSWORD,
                     database=DBNAME,
                     charset='utf8')


def get_cursor_mysql():
    cursor = db.cursor()
    return cursor


def read_from_mysql():
    cursor = get_cursor_mysql()

    sql = "select id,name,username,description,urls,mentions from " + table_name + " where char_length(description)>6 ;"
    cursor.execute(sql)
    results = cursor.fetchall()
    results = [list(result) for result in results]
    return results


def get_time():
    return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) + "| "




ins_names = ['United States Trade Representative','USTradeRep']
ins_names = [ii.lower() for ii in ins_names]
ins_names.append('USTR')



def extract(schema, strs,description):
    key = list(strs[0].keys())
    info = {}  # 保存各个字段获取的信息
    infos=[]

    for i in range(len(schema)):  # 将抽取的实体存入字典
        if schema[i] in key:  # 抽取到的实体的关键词
            index = 0
            institution = [index]
            temp = []
            all_uppper = []
            resort = sorted(strs[0][schema[i]], key=lambda x: x['start'])  # 将一个字段里的信息按start排序


            if schema[i] == 'organization':    #organization关键词的抽取实体的概率小于0.71舍弃
                for org in range(len(resort) - 1, -1, -1):
                    if resort[org]['probability'] < 0.71:
                       del resort[org]

                if not resort :
                    continue


            new = [resort[0]]

            for j in range(0, len(resort)):  # 将缩写去掉
                strss = new[-1]['text']
                if re.search("\s", new[-1]['text']):
                    strss = "".join([word[0] for word in new[-1]['text'].split()])
                if resort[j]['text'].isupper():  #J将全部大写的保留
                    all_uppper.append(resort[j]['text'])
                if difflib.SequenceMatcher(None, resort[j]['text'], strss).ratio() > 0.6 or difflib.SequenceMatcher(None, resort[j]['text'], new[-1]['text']).ratio() > 0.6:
                        if len(resort[j]['text']) > len(new[-1]['text']):
                            new[-1] = resort[j]

                else:
                    new.append(resort[j])

            var = new[0]
            for m in range(0, len(new)):
                if (new[m]['start'] - var['end']) > 5 or 'and' in description[var['end']:var['end']+5] :  # 很大的时候都选
                    institution.append(m)
                    var = new[m]

                else:  # 两者距离很小
                    index = m
                    institution[-1] = m
            for k in institution:
                temp.append(new[k]['text'])
            temp+=all_uppper
            infos.extend(temp)
    return list(set(infos))  # 返回该文本所抽取的字符

def cope_accounts(account):
    description = json.loads(account[3])  # 个人简介
    urls = json.loads(account[4])  # 链接
    url_institution = []
    if account[5].startswith('[{') and not account[5].endswith('}]'):
        mentions = json.loads(account[5] + '"}]')
    else:
        mentions = json.loads(account[5])  # 提及账号
    name = json.loads(account[1])  # 姓名

    mentions_infos = []
    if mentions:
        try:
            for mention in mentions[:]:
                if ''.join(mention.keys()).lower() in ins_names:
                    mentions_infos.extend(list(mention.values()))
        except:
            print('error')
    # 暂时不用链接
    # if urls:
    #     for url in urls:
    #         result1=cope_urls(url)
    #         url_institution.extend(result1)
    #     result.extend(url_institution)
    #     print("url", result)
    return description, mentions_infos


schema=['institution','agency','organization','media']  #
ie = Taskflow('information_extraction', schema=schema, model='uie-base-en')

def start_working():
    # accounts=read_csv()
    accounts = read_from_mysql()
    print(len(accounts))
    db.close()
    count = 0
    for account in accounts:
        count += 1
        description, mentions_info = cope_accounts(account)
        institution = ''
        for ins_name in ins_names:
            if ins_name in description:
                description_info = ie(description)
                if description_info != [{}]:
                    description_info = extract(schema, description_info, description)
                    for k in range(len(description_info)):
                            if ins_name in description_info[k].lower():
                                institution = ins_name
                                break
                    break
                # institution = ins_name
            elif mentions_info:
                if ins_name in mentions_info:
                    institution =ins_name
                    break

        if institution:

            with open(filename_output, 'a', encoding='utf-8-sig') as name:
                header = ['id', 'name', 'username', 'institution', 'description']
                writer = csv.writer(name, lineterminator='\n')
                # global has_title
                # if not has_title:
                #     writer.writerow(header)
                #     has_title = True
                writer.writerow([account[0] + '\t', json.loads(account[1]), account[2], institution, account[3]])
            print(get_time() + "第 " + str(count) + " 个账号处理完毕,是确定账号")
        else:
            print(get_time() + "第 " + str(count) + " 个账号处理完毕，不是确定账号")
            continue


start_working()
