import csv
import io
import pickle
from typing import List, Tuple, Dict, Optional, TypedDict

import chardet
import tqdm
from sklearn.model_selection import train_test_split
from transformers import BertTokenizer
from config import Config


class EncodingData:
    def __init__(self, text: str, input_ids, attention_mask, label):
        self.text = text
        self.input_ids = input_ids
        self.attention_mask = attention_mask
        self.label = label

    def __getstate__(self):
        state = self.__dict__.copy()
        return state

    def __setstate__(self, state):
        self.__dict__.update(state)

    def to_dict(self) -> Dict:
        result = {}
        for attr in ['text', 'input_ids', 'attention_mask', 'label']:
            result[attr] = getattr(self, attr)
        return result


class PreprocessingCache(TypedDict):
    train_ratio: float
    random_state: Optional[int]
    max_length: int
    train_data: List[EncodingData]
    test_data: List[EncodingData]


def get_raw_data() -> List[List[str]]:
    result = []

    for filename in tqdm.tqdm(Config.Path.dataset_filenames, desc='加载原始数据'):
        filepath = (Config.Path.dataset_dir / filename).absolute()

        # 文件不存在则抛出异常
        if not filepath.exists():
            raise FileNotFoundError(f"文件{filepath}不存在!")

        # 自动匹配文件编码
        with open(filepath, 'rb') as f:
            content = f.read()
            encoding = chardet.detect(content)['encoding']
            # 编码匹配失败则使用utf-8
            encoding = encoding if encoding is not None else 'utf-8'
            # 将二进制转为字符串流
            stream = io.StringIO(content.decode(encoding, errors='replace'))

            result.append([])
            # 读取csv文件, 将第一列content加入到result最后一个元素的列表中
            for row in csv.reader(stream):
                result[-1].append(row[0])
    return result


def texts_to_token(raw_data: List[List[str]],
                   tokenizer: BertTokenizer,
                   max_length: int) -> List[EncodingData]:
    result = []
    total = sum([len(arr) for arr in raw_data])

    with tqdm.tqdm(total=total, mininterval=0.1, desc='tokenizer') as bar:
        for label, arr in enumerate(raw_data):
            for text in arr:
                encoding = tokenizer.encode_plus(
                    text,
                    add_special_tokens=True,
                    max_length=max_length,
                    return_token_type_ids=False,
                    padding='max_length',
                    truncation=True,
                    return_attention_mask=True,
                    return_tensors=None
                )
                result.append(EncodingData(
                    text=text,
                    input_ids=encoding['input_ids'],
                    attention_mask=encoding['attention_mask'],
                    label=label
                ))
                bar.update(1)
    return result


def text_preprocess(
        raw_data: Optional[List[List[str]]] = None,
        train_ratio: float = 0.8,
        random_state: Optional[int] = None,
        max_length: int = Config.Train.max_length,
        use_cache: bool = True) -> Tuple[List[EncodingData], List[EncodingData]]:
    # 加载cache
    if use_cache and Config.Path.dataset_cache.exists():
        with open(Config.Path.dataset_cache, 'rb') as f:
            cache: PreprocessingCache = pickle.load(f)
            # 如果三个参数都一致则返回cache的数据
            cache_param_match = (
                cache['train_ratio'] == train_ratio and
                cache['random_state'] == random_state and
                cache['max_length'] == max_length
            )
            if cache_param_match:
                print(f"从缓存文件'{Config.Path.dataset_cache}'中加载数据")
                return cache['train_data'], cache['test_data']

    if raw_data is None:
        raw_data = get_raw_data()

    # text2token
    tokenizer = BertTokenizer.from_pretrained(Config.Train.model_name)
    result = texts_to_token(raw_data, tokenizer, max_length=max_length)
    # 划分数据集
    labels = [i for i, arr in enumerate(raw_data) for _ in range(len(arr))]
    train_size = int(len(result) * train_ratio)
    train_data, test_data = train_test_split(
        result,train_size=train_size ,random_state=random_state, stratify=labels, shuffle=True)

    # 写入cache
    with open(Config.Path.dataset_cache, 'wb') as f:
        cache: PreprocessingCache = {
            'train_ratio': train_ratio,
            'random_state': random_state,
            'max_length': max_length,
            'train_data': train_data,
            'test_data': test_data
        }
        pickle.dump(cache, f) # noqa
        print(f"预处理结果写入'{Config.Path.dataset_cache}'")

    return train_data, test_data


if __name__ == '__main__':
    train_data, test_data = text_preprocess()
    print(f'训练集数据量: {len(train_data)}')
    print(f'测试集数据量: {len(test_data)}')
    encoding_data = train_data[0]
    print(encoding_data.to_dict())
