import logging
from logzero import logger
import jieba
jieba.setLogLevel(logging.INFO)
import pandas as pd
import os
from tqdm import tqdm
import pdb
import re 
import pickle
import json
from datautil.util import textClean,VocabCLS,Example,DatasetIterater
from model import TextCNN
import numpy as np
import torch
from datautil.parameter import args
from datautil.plot import plotHeatMap


device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

def cleanForTrain(args, trainFn=None, devFn=None):
    vocab = VocabCLS()
    if trainFn==None:
        trainset = pd.read_csv(os.path.join(args.sourcePath, "train.csv"))
    else:
        trainset = pd.read_csv(trainFn)

    if devFn==None:
        devset = pd.read_csv(os.path.join(args.sourcePath, "dev.csv"))
    else:
        devset = pd.read_csv(devFn)

    sentences, labels, pois = [],[],[]
    for _, line in tqdm(trainset.iterrows(),desc="进行训练集文本清洗和分词",total=trainset.shape[0],ncols=85):
        sentences.append(textClean(line.content))
        labels.append(line.label)
        pois.append(line.poi)

    for sent in tqdm(sentences,desc="构建字典",ncols=85):
        vocab.addSentence(sent)
    vocab.trim()
    
    sentids = []
    for sent in tqdm(sentences,desc="训练集转为词标",ncols=85):
        sentids.append(vocab.sentence2idx(sent))
    
    maxSeqLen = max([len(sent) for sent in sentids])
    vocabSize = vocab.n_words

    examples = []
    for nid, (ids,label,poi) in tqdm(enumerate(zip(sentids,labels,pois)),desc="生成训练集样本数据", total=len(labels),ncols=85):
        examples.append(Example(nid=nid,ids=ids,label=label,poi=poi))

    os.makedirs(args.processedPath, exist_ok=True)    
    with open(os.path.join(args.processedPath,"train.cache"),"wb") as f:
        pickle.dump(examples,f)
    logger.info(f"处理后的训练集保存在：{os.path.join(args.processedPath,'train.cache')}")

    with open(os.path.join(args.processedPath,"data.json"),"w") as f:
        json.dump(
            {
                "maxSeqLen":maxSeqLen,
                "vocabSize":vocabSize,
            }, f
        )   

    with open(os.path.join(args.processedPath,"vocab.bin"),"wb") as f:
        pickle.dump(vocab,f)

    sentences, labels, pois = [],[],[]
    for _, line in tqdm(devset.iterrows(),desc="进行验证文本清洗和分词",total=devset.shape[0],ncols=85):
        sentences.append(textClean(line.content))
        labels.append(line.label)
        pois.append(line.poi)

    sentids = []
    for sent in tqdm(sentences,desc="验证集转为词标",ncols=85):
        sentids.append(vocab.sentence2idx(sent))

    examples = []
    for nid, (ids,label,poi) in tqdm(enumerate(zip(sentids,labels,pois)),desc="生成验证集样本数据", total=len(labels),ncols=85):
        examples.append(Example(nid=nid,ids=ids,label=label,poi=poi))
    
    with open(os.path.join(args.processedPath,"dev.cache"),"wb") as f:
        pickle.dump(examples,f)
    logger.info(f"处理后的验证集保存在：{os.path.join(args.processedPath,'dev.cache')}")


def doTrain(args):
    # print(device)
    with open(os.path.join(args.processedPath,"train.cache"),"rb") as f:
        trainset = pickle.load(f)
    with open(os.path.join(args.processedPath,"dev.cache"),"rb") as f:
        devset = pickle.load(f)
    with open(os.path.join(args.processedPath,"data.json"),"r") as f:
        dataconfig = json.load(f)

    trainiter = DatasetIterater(trainset, args.batch_size)
    deviter = DatasetIterater(devset, args.batch_size)

    modelConfig = TextCNN.Config(dataconfig["vocabSize"],4)
    model = TextCNN.Model(modelConfig).to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)

    for epoch in range(args.num_epochs):
        model.train()
        trLoss,trAcc,trPre, trRec = [],[],[],[]
        for batch in tqdm(trainiter,desc=f"Train_{epoch}",ncols=85):
            optimizer.zero_grad()
            nids, inputIds, labels, pois = batch
            inputIds = inputIds.to(device)
            labels = labels.to(device)
            loss, acc, precision, recall = model(inputIds, labels)
            trLoss.append(loss.item())
            trAcc.append(acc)
            trPre.append(precision)
            trRec.append(recall)
            loss.backward()
            optimizer.step()
        
        logger.info(f"Training: Epoch {epoch}, Loss {np.average(trLoss):.4f}, Acc {np.average(trAcc):.4f}, Precison {np.average(trPre):.4f}, Recall {np.average(trRec):.4f}")
        

        model.eval()
        tsLoss,tsAcc,tsPre, tsRec = [],[],[],[]
        for batch in tqdm(deviter,desc=f"Train_{epoch}",ncols=85):
            nids, inputIds, labels, pois = batch
            inputIds = inputIds.to(device)
            labels = labels.to(device)
            loss, acc, precision, recall = model(inputIds, labels)
            tsLoss.append(loss.item())
            tsAcc.append(acc)
            tsPre.append(precision)
            tsRec.append(recall)
        logger.info(f"Testing: Epoch {epoch}, Loss {np.average(tsLoss):.4f}, Acc {np.average(tsAcc):.4f}, Precison {np.average(tsPre):.4f}, Recall {np.average(tsRec):.4f}")

    os.makedirs(args.savePath, exist_ok=True)        
    torch.save(model.state_dict(), os.path.join(args.savePath,'model.bin'))
    logger.info(f"模型保存在：{os.path.join(args.savePath,'model.bin')}")

def cleanForPred(args, predFn):
    logger.info("读取数据......")
    predset = pd.read_csv(predFn)

    sentences, labels, pois = [],[],[]
    for _, line in tqdm(predset.iterrows(),desc="进行预测文本清洗和分词",total=predset.shape[0],ncols=85):
        sentences.append(textClean(line.content))
        labels.append(line.label)
        pois.append(line.poi)
    
    logger.info("加载词典......")
    with open(os.path.join(args.processedPath,"vocab.bin"),"rb") as f:
        vocab = pickle.load(f)

    sentids = []
    for sent in tqdm(sentences,desc="预测文本转为词标",ncols=85):
        sentids.append(vocab.sentence2idx(sent))

    examples = []
    for nid, (ids,label,poi) in tqdm(enumerate(zip(sentids,labels,pois)),desc="生成预测文本数据", total=len(labels),ncols=85):
        examples.append(Example(nid=nid,ids=ids,label=label,poi=poi))
    
    with open(os.path.join(args.processedPath,"pred.cache"),"wb") as f:
        pickle.dump(examples,f)
    logger.info(f"处理后的预测文本保存在：{os.path.join(args.processedPath,'pred.cache')}")



def doPred(args):
    logger.info("加载模型......")
    with open(os.path.join(args.processedPath,"data.json"),"r") as f:
        dataconfig = json.load(f)
    modelConfig = TextCNN.Config(dataconfig["vocabSize"],4)
    model = TextCNN.Model(modelConfig).to(device)
    model.load_state_dict(torch.load(os.path.join(args.savePath,'model.bin')))
    
    logger.info("读取处理后的数据......")
    with open(os.path.join(args.processedPath,"pred.cache"),"rb") as f:
        predset = pickle.load(f)
    prediter = DatasetIterater(predset, args.batch_size)
    
    model.eval()
    nidList, poiList, labelList = [],[],[]
    for batch in tqdm(prediter,desc=f"预测数据",ncols=85):
        nids, inputIds, _, pois = batch
        inputIds = inputIds.to(device)
        labels = model(inputIds)
        labelList.extend(labels.tolist())
        nidList.extend(nids)
        poiList.extend(pois)
    
    nid2label = dict(zip(nidList, labelList))
    
    poi2label = pd.DataFrame(zip(poiList, labelList),columns=["poi","label"])
    poi2label = poi2label.groupby("poi").mean()

    newset = []
    for it in predset:
        it.label = nid2label[it.nid]
        newset.append(it)

    with open(os.path.join(args.processedPath,"pred.cache"),"wb") as f:
        pickle.dump(newset, f)

    if os.path.exists(os.path.join(args.savePath,"risk_last.csv")):
        risk = pd.read_csv(os.path.join(args.savePath,"risk_last.csv"))
    else:
        risk = pd.read_csv(os.path.join(args.sourcePath,"risk.csv"))
    risk.index=risk.uid
    for key,value in poi2label.to_dict()["label"].items():
        risk.loc[key,"risk"] = (risk.loc[key,"risk"]+value)/2
    risk.to_csv(os.path.join(args.savePath,"risk_last.csv"), index=False, encoding="utf-8-sig")
    logger.info(f"预测结果保存在：{os.path.join(args.savePath,'risk_last.csv')}")
        

def updateData(args):
    logger.info("合并预测数据到训练集......")
    with open(os.path.join(args.processedPath,"train.cache"),"rb") as f:
        trainset = pickle.load(f)
    with open(os.path.join(args.processedPath,"pred.cache"),"rb") as f:
        predset = pickle.load(f)

    trainset = trainset+predset
    with open(os.path.join(args.processedPath,"train.cache"),"wb") as f:
        pickle.dump(trainset,f)
    logger.info(f"更新后的数据保存在：{os.path.join(args.processedPath,'train.cache')}")


if __name__ == "__main__":
    # "clearnForTrain", "doTrain", "clearnForPred", "doPred", "update"
    if args.action=="clearnForTrain":
        cleanForTrain(args, args.trainFn, args.devFn)
    elif args.action=="doTrain":
        doTrain(args)
    elif args.action=="clearnForPred":
        cleanForPred(args, args.predFn)
    elif args.action=="doPred":
        doPred(args)
    elif args.action=="update":
        updateData(args)
    elif args.action=="doPlot":
        plotHeatMap()