# -*- coding: utf-8 -*-
# @Time    : 2023/6/3 11:36 下午
# @Author  : Wu WanJie

import pandas as pd
from tqdm import tqdm
import random
import jieba
import synonyms
import random
import os

from nlpcda import EquivalentChar, Randomword, Similarword, Homophone, RandomDeleteChar, CharPositionExchange

random.seed(2019)

DATA_DIR = "/note/nlp_algo/app/data/"

LABELS = ['是否有故障', '性价比', '外形外观', '模块分区', '能耗', '散热能力', '赠送礼品', '送货安装服务', '整体尺寸', '保鲜效果', '活动效果', '正品', '外观材质', '是否结霜', '消费者体验', '发货和物流', '价格意见', '空间布局', '便捷性', '产品功能', '容量大小', '服务质量', '产品描述', '制冷效果', '客服服务', '智能化', '售后服务', '软冷冻', '净味功能', '风格设计', '噪音大小', '向他人推荐', '产品质量', '箱门密封性', '忠诚度', '是否包装完好']


class CustomNlpCda(object):
    def __init__(self, create_num=3, change_rate=0.3):
        # 1. 随机(等价)实体替换
        rw = Randomword(create_num=create_num, change_rate=change_rate)
        # 2. 随机同义词替换
        sw = Similarword(create_num=create_num, change_rate=change_rate)
        # 3. 随机近义字替换
        hp = Homophone(create_num=create_num, change_rate=change_rate)
        # 4. 随机字删除
        rdc = RandomDeleteChar(create_num=create_num, change_rate=change_rate)
        # 5. 随机置换邻近的字
        cpe = CharPositionExchange(create_num=create_num, change_rate=change_rate, char_gram=3, seed=1)
        # 6. 等价字替换
        ec = EquivalentChar(create_num=create_num, change_rate=change_rate)
        self.func_lst = {
            "等价实体替换": rw,
            "同义词替换": sw,
            "近义字替换": hp,
            "字删除": rdc,
            "置换邻近的字": cpe,
            "等价字替换": ec
        }

    def create_sentence(self, sentence):
        sentence_lst = []
        for style_name, func in self.func_lst.items():
            for _txt in func.replace(sentence):
                if _txt == sentence:
                    continue
                sentence_lst.append({"style": style_name, "text": _txt})
        sentence_lst.append({"style": "原始文本", "text": sentence})
        return sentence_lst


class CustomEda(object):
    def __init__(self, data_dir, alpha_sr=0.1, alpha_ri=0.1, alpha_rs=0.1, p_rd=0.1, num_aug=9):
        self.alpha_sr = alpha_sr
        self.alpha_ri = alpha_ri
        self.alpha_rs = alpha_rs
        self.p_rd = p_rd
        self.num_aug = num_aug
        self.stop_words = self.load_stop_words(data_dir)

    @staticmethod
    def load_stop_words(data_dir):
        # 停用词列表，默认使用哈工大停用词表
        stop_f = open(os.path.join(data_dir, "stopwords/hit_stopwords.txt"))
        stop_words = list()
        for stop_word in stop_f.readlines():
            stop_words.append(stop_word[:-1])
        return stop_words

    def synonym_replacement(self, words, n):
        """
        同义词替换(Synonym Replacement, SR)
        从句子中随机选取n个不属于停用词集的单词，并随机选择其同义词替换它们；
        """
        new_words = words.copy()
        random_word_list = list(set([word for word in words if word not in self.stop_words]))
        random.shuffle(random_word_list)
        num_replaced = 0
        for random_word in random_word_list:
            synonyms_1 = synonyms.nearby(random_word)[0]
            if len(synonyms_1) >= 1:
                synonym = random.choice(synonyms_1)
                new_words = [synonym if word == random_word else word for word in new_words]
                num_replaced += 1
            if num_replaced >= n:
                break

        sentence = ' '.join(new_words)
        new_words = sentence.split(' ')

        return new_words

    @staticmethod
    def random_insertion(words, n):
        """
        随机插入(Random Insertion, RI)
        随机的找出句中某个不属于停用词集的词，并求出其随机的同义词，将该同义词插入句子的一个随机位置。重复n次；
        """
        new_words = words.copy()
        for _ in range(n):
            synonyms_2 = []
            counter = 0
            while len(synonyms_2) < 1:
                random_word = new_words[random.randint(0, len(new_words) - 1)]
                synonyms_2 = synonyms.nearby(random_word)[0]
                counter += 1
                if counter >= 10:
                    return
            random_synonym = random.choice(synonyms_2)
            random_idx = random.randint(0, len(new_words) - 1)
            new_words.insert(random_idx, random_synonym)
        return new_words

    @staticmethod
    def random_swap(words, n):
        """
        随机交换(Random Swap, RS)：
        随机的选择句中两个单词并交换它们的位置。重复n次；
        """
        new_words = words.copy()
        for _ in range(n):
            random_idx_1 = random.randint(0, len(new_words) - 1)
            random_idx_2 = random_idx_1
            counter = 0
            while random_idx_2 == random_idx_1:
                random_idx_2 = random.randint(0, len(new_words) - 1)
                counter += 1
                if counter > 3:
                    return new_words
            new_words[random_idx_1], new_words[random_idx_2] = new_words[random_idx_2], new_words[random_idx_1]
            return new_words
        return new_words

    @staticmethod
    def random_deletion(words, p):
        """
        随机删除(Random Deletion, RD)
        以p的概率，随机的移除句中的每个单词；
        """
        if len(words) == 1:
            return words

        new_words = []
        for word in words:
            r = random.uniform(0, 1)
            if r > p:
                new_words.append(word)

        if len(new_words) == 0:
            rand_int = random.randint(0, len(words) - 1)
            return [words[rand_int]]

        return new_words

    def create_sentence(self, sentence):
        """
        EDA函数
        """
        seg_list = jieba.cut(sentence)
        seg_list = " ".join(seg_list)
        words = list(seg_list.split())
        num_words = len(words)

        augmented_sentences = []
        num_new_per_technique = int(self.num_aug / 4) + 1
        n_sr = max(1, int(self.alpha_sr * num_words))
        n_ri = max(1, int(self.alpha_ri * num_words))
        n_rs = max(1, int(self.alpha_rs * num_words))

        # 同义词替换sr
        for _ in range(num_new_per_technique):
            a_words = self.synonym_replacement(words, n_sr)
            if not a_words or "".join(a_words) == sentence:
                continue
            augmented_sentences.append({"text": "".join(a_words), "style": "EDA_同义词替换"})

        # 随机插入ri
        for _ in range(num_new_per_technique):
            a_words = self.random_insertion(words, n_ri)
            if not a_words or "".join(a_words) == sentence:
                continue
            augmented_sentences.append({"text": "".join(a_words), "style": "EDA_随机插入"})

        # 随机交换rs
        for _ in range(num_new_per_technique):
            a_words = self.random_swap(words, n_rs)
            if not a_words or "".join(a_words) == sentence:
                continue
            augmented_sentences.append({"text": "".join(a_words), "style": "EDA_随机交换"})

        # 随机删除rd
        for _ in range(num_new_per_technique):
            a_words = self.random_deletion(words, self.p_rd)
            if not a_words or "".join(a_words) == sentence:
                continue
            augmented_sentences.append({"text": "".join(a_words), "style": "EDA_随机删除"})

        # print(augmented_sentences)
        random.shuffle(augmented_sentences)

        if self.num_aug >= 1:
            augmented_sentences = augmented_sentences[:self.num_aug]
        else:
            keep_prob = self.num_aug / len(augmented_sentences)
            augmented_sentences = [s for s in augmented_sentences if random.uniform(0, 1) < keep_prob]

        return augmented_sentences


def merge_candidate(*args):
    lst = list()
    used_text = set()
    for candidate_lst in args:
        for _candidate in candidate_lst:
            if _candidate["text"] in used_text:
                continue

            lst.append(_candidate)
            used_text.add(_candidate["text"])
    return lst


if __name__ == "__main__":
    df = pd.read_csv(os.path.join(DATA_DIR, "competition/train_data.csv"))
    func_1 = CustomNlpCda()
    func_2 = CustomEda(DATA_DIR)
    new_tas = {label: list() for label in LABELS}
    new_tas["text"] = []
    new_tas["style"] = []
    new_tas["data_id"] = []
    count = 0
    for row in tqdm(df.itertuples(), total=len(df)):
        text = getattr(row, "text").replace(" ", "")
        candidates_lst_1 = func_1.create_sentence(text)
        candidates_lst_2 = func_2.create_sentence(text)
        append_lst = []
        candidate_lst = merge_candidate(candidates_lst_1, candidates_lst_2)
        candidate_lst.append({"text": text, "style": "origin"})
        for candidate in candidate_lst:
            [new_tas[label].append(int(getattr(row, label))) for label in LABELS]
            new_tas["text"].append(candidate["text"])
            new_tas["style"].append(candidate["style"])
            new_tas["data_id"].append(count)

        count += 1

    result1 = dict()
    for aa in LABELS:
        result1[aa] = sum(new_tas[aa])
    print(result1)
    df1 = pd.DataFrame(new_tas)
    df1.to_csv("/note/nlp_algo/app/data/competition/train_more.csv", index=False)
