import configparser
from typing import List
import torch
import argparse
import os
from utils.set_seed import set_seed
from tqdm import tqdm
import pandas as pd
from transformers import AutoTokenizer
from model_utils import build_model

os.environ["CUDA_DEVICE_ORDER"] = 'PCI_BUS_ID'
os.environ["CUDA_VISIBLE_DEVICES"] = '1' # 显卡选择


cur = '/'.join(os.path.abspath(__file__).split('/')[:-1])
config = configparser.ConfigParser()
config.read(os.path.join(cur, 'config.ini'), encoding='utf-8')



class Predict:
    def __init__(self):
        # 下面两个需要改动用于预测的文本
        self.model_name = config.get('hugging_model', 'model_name')
        self.test_dataset_path = config.get('datasets', 'test_dataset_path')
        self.output_model_path = config.get('result', 'output_model_path')
        self.batch_size = config.getint('implementation', 'batch_size')
        self.fine_tuning = config.getboolean('implementation', 'fine_tuning') 
        self.output_result_path = config.get('result', 'output_result_path')
        


    def __call__(self, *args, **kwargs):
        self.device = torch.device('cuda:0')
        self.checkpoint = torch.load(config.get('result', 'output_model_path'), map_location=self.device)
        self.model = build_model.AssembleModel(model_name=config.get('hugging_model', 'model_name'), 
                                                labels_num='', fine_tuning=self.fine_tuning).to(self.device)

        
        '''
        # 模型key修改
        for ele in list(self.checkpoint.keys()):
            print(ele)
        for ele in list(self.checkpoint.keys()):
            self.checkpoint[ele[ele.find('.')+1:]] = self.checkpoint.pop(ele)
        '''
        
        self.tokenizer = AutoTokenizer.from_pretrained(config.get('hugging_model', 'model_name'))
        self.model.load_state_dict(self.checkpoint)
        print('---------Initial model Done.---------')
        self.predict()

        return

    def get_entities(self, sentence: List[str], tokens: List[int], beginids: List[int], O_ids:int , limit=1)-> (List[str] and List[List[int]] and List[List[int]]):
        entities_c=[]
        entities_i=[]
        entities_idx = []
        assert len(sentence)==len(tokens) 
        lens = len(sentence)
        i = 0
        while i <lens:
            if tokens[i] in beginids:
                entity_c=[]
                entity_i=[]
                entity_idx=[]
                while i < lens and tokens[i]!=O_ids:
                    entity_c.append(sentence[i])
                    entity_i.append(tokens[i])
                    entity_idx.append(i)
                    i+=1
                if len(''.join(entity_c)) > limit:
                    entities_c.append(''.join(entity_c))
                    entities_i.append(entity_i)
                    entities_idx.append(entity_idx)
            else:
                i+=1
        return entities_c, entities_i, entities_idx
        
    def predict(self):
        self.model.eval()
        result = {'text':[],'tag':[]}
        cnt = 0
        
        # 读取测试文件
        with open(self.test_dataset_path, 'r') as f:
            f.readline()
            td = tqdm([line.strip() for line in f])

        with torch.no_grad():
            for sentence in td:
                result['text'].append(sentence)
                sentence = list(sentence)
                token_ids = self.tokenizer.convert_tokens_to_ids(sentence)
                token_ids = torch.tensor([token_ids], dtype=torch.long).to(self.device)
                mask = (token_ids > 0)
                _, y_hat = self.model(sentence=token_ids, tags=None, mask=mask)
                words,_,_ = self.get_entities(sentence=sentence, tokens=y_hat[0], beginids=[1], O_ids=3)
               
                result['tag'].append(words)
                cnt+=1
                td.set_postfix(str='第{}条数据处理完成'.format(cnt))

        # 保存结果
        df = pd.DataFrame(result)
        df.to_csv(self.output_result_path, index=False)
        return


if __name__ == '__main__':
    set_seed()
    handler = Predict()
    handler()
