import torch
import pickle as pkl
import pandas as pd
import regex as re
import time as time
from importlib import import_module
import pandas as pd
import pymysql
import sqlalchemy
from urllib import parse
import numpy as np
import sys
import random

def classification_model(project_lst, loan_type):
    #读取数据

    if 'mysql' == 'mysql':
        engine, cursor = creat_engine()
        df = ReadSql(project_lst, engine)
    # df = pd.read_excel(r'D:\test\NlpRe4\THUCNews\data\Data1.xlsx',sheet_name='Sheet3')
    if '本地' == '0':

        df = pd.read_excel(r'./THUCNews/data/Data1.xlsx',sheet_name='Sheet2')
    eval_lst = []  #从库里读表   如果为空则默认不开启行业筛选策略

    # 异常判断
    if len(df) == 0:
        raise Exception('特征表为空，请检查', file=sys.stderr)

    # 获取参数
    param = Parameters(loan_type)
    dataset = param.dataset
    model_name = param.model_name
    embedding = param.embedding
    loan_type = param.loan_type
    capital = param.capital
    project = param.project
    print(capital,project)
    standard = param.standard
    dic_code_map = param.dic_code_map

    # 读取模型
    model, config = load_model(dataset, model_name, embedding=embedding)
    model.eval()
    # df = df.rename(columns = {'f_industry_name':'industry_name','f_loan_purpose':'loan_purpose','f_project_id':'project_uuid'})
    # 输入数据
    data = df.copy()
    df['loan_purpose'] = df['loan_purpose'].fillna('非绿')

    print('')
    print('开始构建数据集', file=sys.stderr)
    result = make_data(config, df['loan_purpose'].values)  # 处理数据
    print('数据已经构建完毕', file=sys.stderr)

    print('')
    print('开始预测', file=sys.stderr)
    lst = predict(result, model)  # 预测
    print('预测结束', file=sys.stderr)
    print('')

    print('')
    print('开始生成top5', file=sys.stderr)
    result = output_top5(df, lst, dic_code_map)  # 生成nlp的top5
    print('top5已生成', file=sys.stderr)
    print('')



    # 施加策略
    if project:
        # df_reg = ReModel(data, standard, dic_code_map, standard_id=4, reduce=False)
        # result = ProjectRule(result, df_reg)
        result = CapitalRule(result, data, path=r'./THUCNews/data/')
        result['none_result'] = '0'
        print('项目贷策略已执行完毕', file=sys.stderr)
        print('')
    if capital:
        print('执行流贷策略', file=sys.stderr)
        # 暂无结果识别 行业限定策略
        result = CapitalRule(result, data, path=r'./THUCNews/data/')
        result['none_result'] = '0'
        # result = NoneResult(result,eval_lst)
        print('流贷策略执行完毕', file=sys.stderr)
        print('')

    if '行业召回策略' == '0':
        recommend = pd.read_excel('./recall_table.xlsx')
        result = recall(result,data,recommend)

    if 'industry_name' in result.columns:
        del result['industry_name']
    result['loan_type'] = loan_type
    result.to_excel('./ClassResult.xlsx',index=False)
    result = result[['project_uuid','class_top1','class_top2','class_top3','class_top4','class_top5','loan_type','none_result','identify_time']]#,'recommend_id']]
    # result.columns = ['f_project_id','f_class_top1','f_class_top2','f_class_top3','f_project_type']#,'recommend_id']   TODO
    #DelSql(project_lst, cursor)
    print(result.head())
    # import time
    # time.sleep(20)
    ToSql(result, engine)
    print('')
    print('------------------识别已经结束,结果已返回------------------', file=sys.stderr)
    print('结束时间----',time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()), file=sys.stderr)
    print('')

    return result







def recall(result,data,recommend):
    # result['recommend_id'] = ''

    result = pd.merge(result,data[['project_uuid','loan_purpose']])
    cond1 = result.class_top1 == '非绿'
    cond2 = result.loan_purpose.str.replace('符合绿色融资|贷款用于|采购|贷款用于|购|进|付|款|买|费用|原材料|原料|，|,|。|\.|\(|\)|（|）| ','',regex=True).str.len() <= 4
    cond3 = result.industry_name.isin(list(recommend.industry.unique()))
    cond = (cond1) & (cond2) & (cond3)
    cond_df = result.loc[cond]
    print('命中的数据',cond_df)
    del result['loan_purpose']
    if len(cond_df) > 0:
        rc = recommend.loc[recommend.industry.isin(list(cond_df.industry_name.unique()))]
        print(rc)
        rc['recommend_id'] = rc.recommend_id.astype('str')
        try:
            id = rc[['recommend_id','industry']].groupby('industry').recommend_id.agg(lambda x: '|'.join(random.sample(x.to_list(),3))).reset_index()
        except Exception as exp:
            print(exp,'---------------------------------------')
            id = rc[['recommend_id','industry']].groupby('industry').recommend_id.agg(lambda x: '|'.join(random.sample(x.to_list(),1))).reset_index()
        id.columns = ['industry_name','recommend_id']
        print(id)
        cut1  = result.loc[cond]
        cut2 = result.loc[~cond]
        cut1 = pd.merge(cut1,id,on='industry_name',how='left')
        cut1['class_top1'] = '疑似'
        result = pd.concat([cut1,cut2])

        # print(result)
    if 'recommend_id' not in list(result.columns):
        result['recommend_id'] = ''
    return result



def CapitalRule(result,data,path = r'./THUCNews/data/'):
        try:
            data.loc[data.loan_purpose.str.replace('符合绿色融资|贷款用于|采购|贷款用于|购|进|付|款|买|费用|原材料|原料|，|,|。|\.|\(|\)|（|）| ','',regex=True).str.len()<=4,'neg_len'] = '非绿'
            print('读取策略表')
            df_neg = pd.read_excel(path + 'neg.xlsx', sheet_name='Sheet2')
            df_gb = pd.read_excel(path + 'gb.xlsx')
            df_book = pd.read_excel(path + 'book.xlsx')
            df_pos = pd.read_excel(path + 'pos.xlsx')
            print('')
            print('————————————————————————开始正则施加流贷策略————————————————————————')
            #非绿策略
            if '绿色产品' == '绿色产品':
                for pat in df_neg.pattern.values:
                    data.loc[data.loan_purpose.str.contains(fr'{pat}',regex=True),'neg'] = '非绿'

            #认绿策略
            if '绿色产品' == '绿色产品':
                for pat in df_pos.pattern.values:
                    # value= df_pos.loc[df_pos.pattern == pat, 'Y'].values[0]
                    data.loc[data.loan_purpose.str.contains(pat,regex=True),'pos'] =  df_pos.loc[df_pos.pattern == pat, 'Y'].values[0]

            for pat in df_gb.pattern.values:
                data.loc[data.loan_purpose.str.contains(pat, regex=True), 'pos2'] = df_gb.loc[df_gb.pattern == pat, 'Y'].values[0]

            for pat in df_book.pattern.values:
                data.loc[data.loan_purpose.str.contains(pat, regex=True), 'pos3'] = df_book.loc[df_book.pattern == pat, 'Y'].values[0]
            print('正则完毕')
            data['rule_result'] = data.neg_len
            data.loc[~data.neg.isna(),'rule_result'] = data.neg
            data.loc[~data.pos.isna(),'rule_result'] = data.pos
            data.loc[~data.pos2.isna(),'rule_result'] = data.pos2
            data.loc[~data.pos3.isna(),'rule_result'] = data.pos3
            print('施加完毕')
            print('')
            result = pd.merge(data[['project_uuid','rule_result']],result,on='project_uuid')
            # result.to_excel('./rule_result.xlsx',index=False)
            result.loc[result.rule_result.isna(),'rule_result'] = result.class_top1
            del result['class_top1']
            result = result.rename(columns={'rule_result':'class_top1'})
        except Exception as exp:
            print('')
            print('**************************************策略模块失效请假查********************************************')
            print(exp)
        return result



def NoneResult(result,eval_lst):
    if len(eval_lst) == 0:                     #如果评价列表为空 则认为全都评价
        result['none_result'] = '0'
        return result
    result.loc[result.industry_name.isin(eval_lst),'none_result']  = '0'   #评价则定位0
    result['none_result'] = result.none_result.fillna('1')                 #不评价定位1
    return result



class Parameters(object):
    def __init__(self,loan_type):
        self.loan_type = loan_type
        self.project = False       #两个必须最开始默认为False
        self.capital = False
        self.dataset = 'THUCNews'
        self.dic_code_map = pd.read_csv('THUCNews/data/class.csv')  #改成csv
        self.standard = pd.read_csv('THUCNews/data/loan.csv')
        if self.loan_type == '1':
            print('**************************此次识别为流贷识别**************************', file=sys.stderr)
            print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime()), file=sys.stderr)
            self.model_name = 'TextCNN_capital_loan'
            self.embedding = 'embedding_SougouNews_capital.npz'
            self.capital = True
        elif self.loan_type == '2':
            print('**************************此次识别为项目贷识别**************************', file=sys.stderr)
            print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()), file=sys.stderr)
            self.model_name = 'TextCNN_project_loan'
            self.embedding = 'embedding_SougouNews_project.npz'
            self.project = True
        else:
            raise Exception('参数错误', file=sys.stderr)



def load_model(dataset,model_name,embedding = 'embedding_SougouNews.npz'):
    print('')
    print('开始加载nlp模型', file=sys.stderr)
    x = import_module('models2.' +model_name)
    config = x.Config(dataset ,embedding)
    model = x.Model(config).to(config.device)
    model.load_state_dict(torch.load(config.save_path))
    print('模型加载已完成', file=sys.stderr)
    print('')
    return model,config

def ReModel(data,standard,dic_code_map,standard_id=4,reduce=False):
    if reduce == True:                                              #是否切片
        df_find_rule = pd.read_excel('THUCNews/data/find_rule.xlsx',sheet_name='有效策略')
        standard = standard.loc[standard.catalogue_id.isin(df_find_rule.Y.values)]
        print('切片已完成', file=sys.stderr)
    # dic_map = dic_code_map.set_index('y').y_code
    print('')
    print('正在进行正则模型的绿色识别，请等待->->->->-----------------', file=sys.stderr)
    result = GI_model(data,standard,standard_id)    #输出结果
    print('正则模型识别完毕', file=sys.stderr)
    print('')
    # print(result.head(3))
    result['catalogueId'].fillna('211',inplace=True)  #空值填充成非绿

    #清洗数据
    #转换数据类型并expand
    print('')
    print('开始分裂正则模型的多个结果', file=sys.stderr)
    result.catalogueId = result.catalogueId.astype('str')
    # print('********************************************')
    # print(result.catalogueId.str.replace(r'[\[\]\']','',regex=True).str.replace(' ','').str.split(',',expand=True).iloc[:,:3].astype('str'))
    temp = result.catalogueId.str.replace(r'[\[\]\']','',regex=True).str.replace(' ','').str.split(',',expand=True).iloc[:,:3].astype('str')
    if temp.shape[1] == 1:
        temp['temp1'] = np.nan
        temp['temp2'] = np.nan
    if temp.shape[1] == 2:
        temp['temp3'] = np.nan
    result[['out','out2','out3']] = temp
    # print('********************************************------------------------')
    # print(result)
    print('分裂完毕')
    print('')
    # print(result.head())
    print('')
    print('开始映射code为分类目录', file=sys.stderr)
    dic_code_map = dic_code_map.astype('str')
    dic_code = dic_code_map.set_index('y_code').y

    result.out = result.out.map(dic_code)
    result.out2 = result.out2.map(dic_code)
    result.out3 = result.out3.map(dic_code)

    #缺失值填充为空
    result.out = result.out.fillna('非绿')
    result.out2 = result.out2.fillna('非绿')
    result.out3 = result.out3.fillna('非绿')
    print('映射目录转换完毕', file=sys.stderr)
    print('')
    print('正则结果已经输出', file=sys.stderr)
    print('')
    # print(result.head())
    return result


def ProjectRule(result, df_reg):
    print('')
    print('开始对nlp模型施加策略', file=sys.stderr)
    result = pd.merge(result, df_reg[['project_uuid', 'out', 'out2', 'out3']], on='project_uuid', how='left')
    result.loc[result.out != '非绿', 'class_top1'] = result.out
    result.loc[result.out2 != '非绿', 'class_top2'] = result.out2
    result.loc[result.out3 != '非绿', 'class_top3'] = result.out3  # 以正则为准
    result = result[
        ['project_uuid', 'class_top1', 'class_top2', 'class_top3', 'class_top4', 'class_top5', 'identify_time']]
    print('策略处理已完成', file=sys.stderr)
    print('')

    # print(result.head())
    return result

def GI_model(data,standard,standard_id):          # 匹配分类
    # print('')
    # print('GI_model模块开始被调用-----------')
    dfr = pd.DataFrame(columns=['standardId','project_uuid','catalogueId','greenFlag'])
    for text,id in zip(data['loan_purpose'].values,data['project_uuid'].values):          #暂时写死
        x = {
            'standardId': f'{standard_id}','project_uuid':id
        }
        result = []
        for i in range(standard.shape[0]):
            expr = standard.iloc[i]['reg_exr']            #拿到规则 --文本特征
            catalog_id = standard.iloc[i]['catalogue_id'] #拿到分类
            if re.search(expr, text):                     #去搜索文本特征
                x['catalogueId'] = str(catalog_id)        #如果文本中有这个特征则在字典中存储下来对应的分类
                y = x.copy()                              #下一次捕捉到会进行覆盖
                result.append(y)                          #多个分类则形成lst
                continue
            else:
                continue
        if len(result) == 0:                              #如果没有捕捉到 则标记成0
            x['greenFlag'] = '0'
            # print(x)
        if len(result) >= 1:                              #如果捕捉到1个则定位绿色
            x['greenFlag'] = '1'
            # print(x)
        if len(result) > 1:                               #如果捕捉到1个以上则定义为疑似
            x['greenFlag'] = '2'
            x['catalogueId'] = list(set([catalog['catalogueId'] for catalog in result[0:3]]))

            # print(x)
        temp = pd.DataFrame([x])
        dfr = pd.concat([dfr,temp])
        # print('GI_model模块已计算完毕--------')
        # print('')
    return dfr.reset_index(drop=True)



def make_data(config,f,pad_size=20):

    tokenizer = lambda x: [y for y in x]
    UNK, PAD = '<UNK>', '<PAD>'
    contents = []
    vocab = pkl.load(open(config.vocab_path, 'rb'))

    for line in f:
        lin = line.strip()
        if not lin:
            continue
        content = lin
        label = 0

        words_line = []

        token = tokenizer(content)
        seq_len = len(token)  # 记录每句话长度
        # 截断与增长
        if len(token) < pad_size:
            token.extend([PAD] * (pad_size - len(token)))
        if len(token) > pad_size:
            token = token[:pad_size]
            seq_len = len(token)

        # 查字典做映射 查不到给未知词
        for word in token:
            words_line.append(vocab.get(word, vocab[UNK]))  # 返回未知词的索引

        contents.append(((words_line, int(label)), seq_len))  # 讲数量化的向量加入到列表保存
        # print('数据集构建完毕--------------------------')
    return contents

def predict(result,model):

    lst = []
    for i ,r in enumerate(result):
        text = r[0]
        x = torch.LongTensor([text[0]])
        y = torch.LongTensor([text[1]])
        text = (x, y)

        out = model(text)

        # r2 = torch.max(out, 1)[1].cpu().numpy()[0]
        topk_values,r2 = torch.topk(out,k=5)
        r2 = r2[0].tolist()
        # print(r2)
        lst.append(r2)

    return lst


def output_top5(df,lst,dic_code_map):
#标准输出top5
    print('')
    print('开始映射输出top5结果', file=sys.stderr)
    dft = pd.DataFrame(lst,columns=['out','out2','out3','out4','out5'])
    # dic_code_map = pd.read_excel('THUCNews/data/class.xlsx')
    dic = dic_code_map.set_index('y_code').y.to_dict()
    #输出数据
    dfr = dft.out.map(dic)
    dfr2 = dft.out2.map(dic)
    dfr3 = dft.out3.map(dic)
    dfr4 = dft.out4.map(dic)
    dfr5 = dft.out5.map(dic)
    result = pd.concat([df,dfr,dfr2,dfr3,dfr4,dfr5],axis = 1)
    print('输入表输出表合并已完成', file=sys.stderr)
    result = result[['project_uuid','industry_name','out','out2','out3','out4','out5']]
    result.columns = ['project_uuid','industry_name','class_top1','class_top2','class_top3','class_top4','class_top5']
    result['identify_time'] = time.strftime('%Y%m%d %H:%M:%S',time.localtime())
    print('top5映射分类已生成', file=sys.stderr)
    return result



def read_sql_param(path='sql_param.txt'):
    print('')
    print('开始读取数据库参数', file=sys.stderr)
    df = pd.read_csv(path)
    host = df.host.values[0]
    user = df.user.values[0]
    passwd = df.passwd.values[0]
    port = df.port.values[0]
    schema_database = df.schema_database.values[0]
    print('读取完毕', file=sys.stderr)
    print('')
    return  str(host),str(user),str(passwd),int(port),str(schema_database)

def creat_engine():
    host, user, passwd, port, schema_database = read_sql_param()
    pwd = parse.quote_plus(passwd)
    try:
        engine = sqlalchemy.create_engine(f'mysql+pymysql://{user}:{pwd}@{host}:{port}/{schema_database}?charset=utf8')
        print('连库成功1', file=sys.stderr)
        db = pymysql.connect(host=host, port=port, user=user,passwd=passwd, database=schema_database)
        cursor = db.cursor()
        print('连库成功2', file=sys.stderr)
        return engine, cursor
    except Exception as exp:
        print('连接数据库失败：',exp, file=sys.stderr)
        return 0


def ReadSql(lst,engine):
    # project_uuid = str(tuple(lst))
    project_id = '(' + str(lst).replace('[','').replace(']','') + ')'

    sql = '''
          select  * from green_identify_model_input
          where project_uuid in {}  
          '''.format(project_id)
    # print(sql)

    # sql_eval = '''
    #             select * from  industry_rule
    #              '''
    df = pd.read_sql(sql,engine)
    # eval_df = pd.read_sql(sql_eval,engine)
    # if len(eval_df) == 0:
    #     print('')
    #     print('eval_df表为空,目前对所有行业都会评价', file=sys.stderr)
    #     print('')
    #
    # sql_train = '''  select  distinct industry  from  green_identify_train_data'''
    # train_data_eval_df = pd.read_sql(sql_train,engine)
    # if len(train_data_eval_df) == 0:
    #     print('')
    #     print('train_data_eval_df表为空', file=sys.stderr)
    #     print('')

    return df#,eval_df,train_data_eval_df



def ToSql(df,engine):
    print('')
    print('开始写入数据库', file=sys.stderr)
    a,b,c,d,schema_database = read_sql_param()
    # print(df.columns)
    # import time
    # print(time.sleep(30))
    rows = df.to_sql('green_identify_model_output',con=engine,schema=schema_database,if_exists='append',index=False)
    print('已经向green_identify_model_output中写入{}行'.format(rows), file=sys.stderr)


def DelSql(lst,cursor):
    print('')
    print('开始删除数据', file=sys.stderr)
    project_uuid = '(' + str(lst).replace('[','').replace(']','') + ')'
    delsql = '''
    delete from green_identify_model_output
    where project_uuid in {}
    '''.format(project_uuid)
    cursor.execute(delsql)
    cursor.execute('commit')
    cursor.close()
    print('删除成功', file=sys.stderr)

