#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author: Gaoxiang

import jieba
import torch
import os
import dill
import pandas as pd
from torchtext.data import Field
from torchtext import data
from text_cls_config import Config


def datafield_init(config):
    if config["tokenizer"] == "jieba":
        TEXT = Field(sequential=True,
                     tokenize=jieba_tokenize,
                     batch_first=True,
                     fix_length=config["seq_length"])
    else:
        TEXT = Field(sequential=True,
                     tokenize=split_tokenize,
                     batch_first=True,
                     fix_length=config["seq_length"])
    LABEL = Field(sequential=False)

    datafield = (TEXT, LABEL)
    return datafield


def dataloader_init(config):
    datafield = datafield_init(config)
    if not config["to_build_new_vocab"]:
        load_vocab(config, datafield)
        update_vocab_config(config, datafield)
    return datafield


def load_vocab(config, datafield):  # 对Filed的inplace修改
    TEXT, LABEL = datafield
    assert os.path.exists(config["label_vocab_path"]) and os.path.exists(config["text_vocab_path"])
    with open(config["text_vocab_path"], 'rb') as f:
        TEXT.vocab = dill.load(f)
    with open(config["label_vocab_path"], 'rb') as f:
        LABEL.vocab = dill.load(f)


def build_vocab(config, datafield, dataset):
    TEXT, LABEL = datafield
    TEXT.build_vocab(dataset)
    LABEL.build_vocab(dataset)

    with open(config["text_vocab_path"], 'wb') as f:
        dill.dump(TEXT.vocab, f)
    with open(config["label_vocab_path"], 'wb') as f:
        dill.dump(LABEL.vocab, f)


def update_vocab_config(config, datafield):
    TEXT, LABEL = datafield

    config["vocab_size"] = len(TEXT.vocab)
    config["num_class"] = len(LABEL.vocab)
    config["label_map"] = {v: k for k, v in LABEL.vocab.stoi.items()}


# 定义分词方法
def jieba_tokenize(text):
    stopwords = stopwords_list(Config["stopwords_list_path"])  # 这里加载停用词的路径
    return [word for word in jieba.cut(text) if word.strip() not in stopwords]  # 使用jieba做中文分词并且加载停用词


# 中文split，不筛停用词
def split_tokenize(text):
    return [word for word in text]


# 加载停用词词库
def stopwords_list(filepath):
    stopwords = [line.strip() for line in open(filepath, 'r', encoding='utf-8').readlines()]
    return stopwords


def regular_modify(data, config):
    data.loc[data[config["content_col"]].str.contains(
        "投诉|举报|报警|误导", case=False, na=False), config["flag_col"]] = 3
    data.loc[data[config["content_col"]].str.contains(
        "押金", case=False, na=False), config["flag_col"]] = 0
    data.loc[data[config["content_col"]].str.contains(
        "我不是|不叫|错|骚扰|乱发|别发|别再发|不要发|不要再发|不要再给|打扰|还发|误发|老发|换号|换手机号|没买保险|谁是|不叫|不姓|我姓|名字是|号码换人|手机换人|不是我的|我是女的|我是男的|烦我|搞清楚",
        case=False, na=False), config["flag_col"]] = 1


def load_raw_data(raw_data):
    if raw_data.endswith(".xlsx"):
        raw_data = pd.read_excel(raw_data)
    else:
        raw_data = pd.read_csv(raw_data)
    return raw_data


def create_examples(raw_data, is_test_set, fields, config):
    if not is_test_set:
        examples = []
        raw_data[config["content_col"]] = raw_data[config["content_col"]].astype(str)
        raw_data[config["flag_col"]] = raw_data[config["flag_col"]].astype(str)
        for row in raw_data.itertuples():
            content = getattr(row, config["content_col"])
            label = getattr(row, config["flag_col"])
            examples.append(data.Example.fromlist([content, label], fields))
    else:
        examples = []
        raw_data[config["content_col"]] = raw_data[config["content_col"]].astype(str)
        for row in raw_data.itertuples():
            content = getattr(row, config["content_col"])
            examples.append(data.Example.fromlist([content, None], fields))
    return examples


class MsgDataloader(data.Dataset):
    def __init__(self, data_path, text_field, label_field=None, is_test_set=False, config=None):
        fields = [("text", text_field), ("label", label_field)]
        raw_data = load_raw_data(data_path)

        if (not is_test_set) and config["use_regular"]:
            regular_modify(raw_data, config)

        examples = create_examples(raw_data, is_test_set, fields, config)
        super().__init__(examples, fields)


class DataIterator:
    def __init__(self, raw_set, is_test_set, use_cuda, batchsize, datafield, config):
        self.config = config
        self.raw_set = raw_set
        self.is_test_set = is_test_set
        self.shuffle = not is_test_set
        if use_cuda:
            self.device = torch.device("cuda")
        else:
            self.device = torch.device("cpu")
        self.batchsize = batchsize
        self.datafield = datafield

        self.build_dataset()
        # 建立新字典
        if self.config["to_build_new_vocab"]:
            self.build_new_vocab()

    def build_dataset(self):
        if not self.is_test_set:
            TEXT, LABEL = self.datafield
        else:
            TEXT, _ = self.datafield
            LABEL = None

        self.dataset = MsgDataloader(self.raw_set, text_field=TEXT,  # 传递Field索引
                                     label_field=LABEL, is_test_set=self.is_test_set,
                                     config=self.config)

    def build_new_vocab(self):
        build_vocab(self.config, self.datafield, dataset=self.dataset)
        update_vocab_config(self.config, self.datafield)
        self.config["to_build_new_vocab"] = False

    def get_tier_data(self):
        dataset_iter = data.BucketIterator(dataset=self.dataset, batch_size=self.batchsize,
                                           shuffle=self.shuffle, sort_key=lambda x: len(x.text),
                                           device=self.device, sort_within_batch=False, repeat=False)
        return dataset_iter


if __name__ == "__main__":
    datafield = dataloader_init(config=Config)
    train_data = DataIterator(raw_set=Config["train_data_path"],
                              is_test_set=False,
                              use_cuda=Config["use_cuda"],
                              batchsize=Config["batch_size"],
                              datafield=datafield,
                              config=Config)
    TEXT, LABEL = datafield
    print(TEXT.vocab.stoi.items())
    for batch_idx, (data, target) in enumerate(train_data.get_tier_data()):
        print(batch_idx, (data, target))
        break

    # Config["to_build_new_vocab"] = False
    # datafield = dataloader_init(config=Config)
    #
    # valid_data = DataIterator(raw_set=Config["valid_data_path"],
    #                           is_test_set=True,
    #                           use_cuda=Config["use_cuda"],
    #                           batchsize=Config["batch_size"],
    #                           datafield=datafield,
    #                           config=Config)
    #
    # for batch_idx, (data, target) in enumerate(valid_data.get_tier_data()):
    #     print(batch_idx, (data, target))
    #     break
