import re  # 用于正则表达式操作
import pandas as pd  # 用于数据处理，读取CSV文件
import numpy as np  # 用于数值计算
import jieba  # 用于中文分词
import warnings  # 用于警告处理,在很多情况下是为了让输出更简洁
warnings.filterwarnings('ignore')  # 不显示警告
from sklearn.feature_extraction.text import CountVectorizer  # 用于文本特征提取
from sklearn.utils import resample
import torch
from sklearn.model_selection import train_test_split
from transformers import BertTokenizer

def text_predeal(temp):
    temp = re.sub('[^\u4e00-\u9fa5aA-Za-z0-9，。？：！；“”]', ' ', temp)  # 保留中文、英文、数字以及一些标点符号
    temp = temp.replace('网站', '')  # 去除特定的词汇
    temp = re.sub(re.compile('<.*?>'), ' ', temp)  # 去除HTML标签
    temp = temp.strip()  # 去除首尾空白字符
    return temp


def jiebafenci(sentences, step = False):
    dots=list(jieba.cut(sentences))  # 使用jieba库进行中文分词
    if step:
        return "".join(dots)
    return " ".join(dots)  # 将分词结果拼接成字符串


def init_data():
    train_data = pd.read_csv('/data/sfq/oco/train.csv', sep='\t')
    test_data = pd.read_csv('/data/sfq/oco/test_new.csv', sep=',')
    # 对文本进行预处理，去除一些非文字符
    train_data['comment'] = train_data['comment'].apply(lambda x: text_predeal(x))
    test_data['comment'] = test_data['comment'].apply(lambda x: text_predeal(x))
    # 加载停用词表
    stop_words = []
    with open('/data/sfq/oco/stopwords.txt', 'r', encoding='utf-8') as f:
        stop_words = [line.strip() for line in f if line.strip()]
    # 对评论文本进行分词和去除停用词
    train_temp = [jiebafenci(knob) for knob in train_data['comment'].values]
    test_temp = [jiebafenci(knob) for knob in test_data['comment'].values]
    # 文本特征提取
    vector = CountVectorizer(min_df=10, ngram_range=(1, 1), token_pattern=r'\b\w+\b', stop_words=stop_words)
    vector.fit(train_temp + test_temp)
    matrix_test = vector.transform(test_temp).toarray()

    # 获取少数类和多数类的样本
    train_data_minority = train_data[train_data['label'] == 1]
    train_data_majority = train_data[train_data['label'] == 0]
    
    # 过采样少数类样本，使得少数类样本数量与多数类样本数量相等
    train_data_minority_upsampled = resample(
        train_data_majority, 
        replace=False,     # 允许放回抽样
        n_samples=len(train_data_minority)*2,  # 匹配多数类样本数量
        random_state=123  # 固定随机种子
    )
    
    # 组合过采样后的少数类和原始的多数类样本
    train_data_upsampled = pd.concat([train_data_minority, train_data_minority_upsampled])
    train_data_upsampled = train_data_upsampled.sample(frac=1.0, random_state=123).reset_index(drop=True)
    # 对过采样后的数据重新进行特征提取
    upsampled_train_temp = [jiebafenci(knob) for knob in train_data_upsampled['comment'].values]
    matrix_train_upsampled = vector.transform(upsampled_train_temp).toarray()
    label_train_upsampled = np.array(train_data_upsampled['label'].tolist())

    return matrix_train_upsampled, label_train_upsampled, matrix_test, test_data, len(vector.vocabulary_)


PAD, CLS, SEP = '[PAD]', '[CLS]', '[SEP]'  # padding符号, bert中综合信息符号

def build_dataset(config):
    train_data = pd.read_csv(config.train_path, sep='\t')
    test_data = pd.read_csv(config.test_path, sep=',')
    train_data['comment'] = train_data['comment'].apply(lambda x: text_predeal(x))
    test_data['comment'] = test_data['comment'].apply(lambda x: text_predeal(x))
    keywords = []
    with open('/data/sfq/oco/keywords.txt', 'r', encoding='utf-8') as f:
        for line in f:
            keyword, weight = line.strip().split(',')
            keywords.append(keyword)
    tokenizer = BertTokenizer.from_pretrained(config.bert_path)
    tokenizer.add_special_tokens({'additional_special_tokens':['[SPE]']})
    config.tokenizer = tokenizer
    X_train, X_val, y_train, y_val = train_test_split(train_data['comment'].values, train_data['label'], test_size=0.2, random_state=42)
    
    def insert_special_token(text, keywords):
        for keyword in keywords:
            match = re.findall(r'\b' + keyword + r'\b', text, re.IGNORECASE)
            if match:
                text = text.replace(keyword, f'[SPE]{keyword}[SPE]')
        return text
    
    def load_dataset(datas, labels, pad_size=128, is_test=False):  
        contents = []
        leng = len(labels)
        i = 0
        while i < leng:
            content = insert_special_token(datas[i], keywords)
            label = labels[i]
            # tokenizer 使用方式 1
            # token = tokenizer.tokenize(content)
            # token = [CLS] + token
            # seq_len = len(token)
            # mask = []
            # token_ids = tokenizer.convert_tokens_to_ids(token)
            # if pad_size:
            #     if len(token) < pad_size:
            #         mask = [1] * len(token_ids) + [0] * (pad_size - len(token))
            #         token_ids += ([0] * (pad_size - len(token)))
            #     else:
            #         mask = [1] * pad_size
            #         token_ids = token_ids[:pad_size]
            #         seq_len = pad_size

            # 另一种使用 tokenizer 的方式
            encoded_dict = tokenizer.encode_plus(
                content,
                add_special_tokens=True,
                max_length=pad_size,
                pad_to_max_length=True,
                return_attention_mask=True,
                return_tensors='pt',
            )
            token_ids = encoded_dict['input_ids'][0].tolist()
            mask = encoded_dict['attention_mask'][0].tolist()
            seq_len = len(token_ids)
            if is_test == False:
                contents.append((token_ids, int(label), seq_len, mask))
            else:
                contents.append((token_ids, int(-1), seq_len, mask))
            i += 1
        return contents
    
    train = load_dataset(X_train, y_train.values, config.pad_size)
    dev = load_dataset(X_val, y_val.values, config.pad_size)
    test = load_dataset(test_data['comment'].values, test_data['id'], config.pad_size, True)
    train_data = train + dev
    # train_iter = build_iterator(train, config)
    # dev_iter = build_iterator(dev, config)
    test_iter = build_iterator(test, config)
    # return train_iter, dev_iter, test_iter, test_data
    return train_data, test_iter, test_data

class DatasetIterater(object):
    def __init__(self, batches, batch_size, device):
        self.batch_size = batch_size
        self.batches = batches
        self.n_batches = len(batches) // batch_size
        self.residue = False  # 记录batch数量是否为整数
        if len(batches) % self.n_batches != 0:
            self.residue = True
        self.index = 0
        self.device = device

    def _to_tensor(self, datas):
        x = torch.LongTensor([_[0] for _ in datas]).to(self.device)
        y = torch.LongTensor([_[1] for _ in datas]).to(self.device)

        # pad前的长度(超过pad_size的设为pad_size)
        seq_len = torch.LongTensor([_[2] for _ in datas]).to(self.device)
        mask = torch.LongTensor([_[3] for _ in datas]).to(self.device)
        return (x, seq_len, mask), y

    def __next__(self):
        if self.residue and self.index == self.n_batches:
            batches = self.batches[self.index * self.batch_size: len(self.batches)]
            self.index += 1
            batches = self._to_tensor(batches)
            return batches

        elif self.index >= self.n_batches:
            self.index = 0
            raise StopIteration
        else:
            batches = self.batches[self.index * self.batch_size: (self.index + 1) * self.batch_size]
            self.index += 1
            batches = self._to_tensor(batches)
            return batches

    def __iter__(self):
        return self

    def __len__(self):
        if self.residue:
            return self.n_batches + 1
        else:
            return self.n_batches


def build_iterator(dataset, config):
    iter = DatasetIterater(dataset, config.batch_size, config.device)
    return iter


def get_fold(config, train_data, idx):
    train_data = train_data
    slide = (int)(len(train_data)/config.fold)
    dev_data = train_data[idx*slide: (idx+1)*slide]
    train_data = train_data[0: idx*slide]+train_data[(idx+1)*slide:]
    train_iter = build_iterator(train_data, config)
    dev_iter = build_iterator(dev_data, config)
    return train_iter, dev_iter