import transformers
from transformers import GPT2Tokenizer, GPT2Model
import numpy as np
import torch
import torch.nn.functional as F
import re
import os
import argparse
import pickle
import stanza
from tqdm import tqdm

# 用到的路径
model_dir = r'E:\TJU\Grade3\NLP\Project\cheatGPT-ryan\models'
load_dir = r'..\results\generate'
save_dir = r'..\results\perturb'

BUFFER_SIZE = 1
MASK_PATTERN = re.compile(r"<extra_id_\d+>")
MASK_STRING = '<<<mask>>>'                  # Model-dependent



def perturb_save():
    # 加载数据的路径
    if args.source_model_name == "human":
        load_path = os.path.join(load_dir, args.dataset, "human")
        save_path = os.path.join(save_dir, args.dataset, "human")
    else:
        load_path = os.path.join(load_dir, args.dataset, "LLMs", f"{args.source_model_name}")
        save_path = os.path.join(save_dir, args.dataset, "LLMs", f"{args.source_model_name}")
    files = os.listdir(load_path)
    
    texts = []
    for text_file in files[args.checkpoint:args.n_samples]:
        f = open(os.path.join(load_path, text_file), "r", encoding="utf-8")
        text = f.read()
        
        texts.append(text)
        f.close()

    if args.dataset == "chinese":
        # 只留前300个字符
        texts = [text[:300] for text in texts]
        # 进行分词
        texts = [stanza_segment(text) for text in texts]

    # 对文本进行分段
    seg_texts = segment(texts)

    for idx, seg_text in enumerate(tqdm(seg_texts, desc="Processing segments")):
        # 对当前已经分段的文本进行n次扰动处理
        # 得到结果为 List[List[string]]，其中每个内部列表代表对原始故事进行一次扰动后的结果 (扰动数, 分段数)
        perturb_sample = []  # 初始化一个空列表来存储扰动样本
        for i in range(args.num_perturbations):  # 循环args.num_perturbations次
            perturbed_text = perturb(seg_text)  # 对seg_text进行扰动
            perturb_sample.append(perturbed_text)  # 将扰动后的文本添加到列表中
        
        # 保存扰动后的文本
        save_pkl(save_path, seg_text, perturb_sample, idx + args.checkpoint)

def segment(texts):
    """
    @参数 texts: List[String] 输入文本

    +--------+
    | 返回值 |  
    +--------+
    A List[List[String]]，其中每个内部列表包含一个文本段，每个文本段的长度不超过 args.segment_length
    """
    # # 读取文件
    # if args.dataset == "chinese":
    #     tokenized_texts = [list(text.replace(" ", "")) for text in texts]  # 按字分割
    # else:
    #     tokenized_texts = [text.split(' ') for text in texts]
    tokenized_texts = [text.split(' ') for text in texts]

    # 文本长度
    text_lengths = np.array([len(tokenized_text) for tokenized_text in tokenized_texts])
    # 段数量
    n_segments = np.floor(text_lengths / args.segment_length).astype(int)
    print(f"segment_length: {args.segment_length}")
    print(f"min_n_segments {np.min(n_segments)}")
    print(f"min_text_length: {np.min(text_lengths)}")

    # 段长度
    segment_lengths = np.ceil(text_lengths / n_segments).astype(int)
    segmented_texts = []
    for idx, tokenized_text in enumerate(tokenized_texts):
        text_segments = []
        segment_length = segment_lengths[idx]
        for segment_idx in range(n_segments[idx]):
            text_segment = tokenized_text[segment_idx * segment_length:(segment_idx + 1) * segment_length]
            #text_segments.append("".join(text_segment) if args.dataset == "chinese" else " ".join(text_segment))
            text_segments.append(" ".join(text_segment))

        segmented_texts.append(text_segments)

    return segmented_texts

def stanza_segment(text):
    """
    使用Stanza对中文文本进行语义分词
    @参数 text: String 输入文本
    @返回值: String 分词后的文本，不同词之间用空格隔开
    """
    nlp = stanza.Pipeline('zh', processors='tokenize')  # 只加载分词处理器
    
    doc = nlp(text)

    segments = []
    for sentence in doc.sentences:
        for word in sentence.tokens:
            segments.append(word.text)

    tokenized_text = " ".join(segments)
    return tokenized_text

def perturb(text):
    """
    @参数 text: List[string] 输入文本 (分段后)

    +--------+
    | 返回值 |
    +--------+
    List[string] 一个扰动后的文本
    """
    #文本掩码处理
    masked_text = [tokenize_and_mask(x) for x in text]
    #生成替换掩码的文本
    raw_fills = replace_masks(masked_text)
    #提取填充内容
    extracted_fills = extract_fills(raw_fills)
    #应用填充内容
    perturbed_texts = apply_extracted_fills(masked_text, extracted_fills)

    # 处理空扰动文本
    # 有些文本可能在填充后变为空字符串，这里处理一下
    # attempts = 1
    # while '' in perturbed_texts:
    #     idxs = [idx for idx, x in enumerate(perturbed_texts) if x == '']
    #     print(f'WARNING: {len(idxs)} text have no fills. Trying again [attempt {attempts}].')
    #     masked_texts = [tokenize_and_mask(x) for idx, x in enumerate(text) if idx in idxs]
    #     raw_fills = replace_masks(masked_texts)
    #     extracted_fills = extract_fills(raw_fills)
    #     new_perturbed_texts = apply_extracted_fills(masked_texts, extracted_fills)
    #     for idx, x in zip(idxs, new_perturbed_texts):
    #         perturbed_texts[idx] = x
    #     attempts += 1
    #     if attempts > 3:
    #         break
    
    return perturbed_texts

def count_masks(texts):
    """
    @参数 texts: List[String] 分段后的文本

    +--------+
    | 返回值 |
    +--------+
    List[int] 一个整数列表，其中每个整数表示相同位置的文本中掩码的数量
    通过简单地遍历每个文本并计算它看到的 "<extra_id_NUM>" 格式字符串的数量来计算。
    """
    return [len([x for x in text.split() if x.startswith("<extra_id_")]) for text in texts]

    #检查当前单词 x 是否以字符串 <extra_id_ 开头，计算掩码数目
    if args.dataset == "chinese":
        return [len(re.findall(r'<extra_id_\d+>', text)) for text in texts]
    else:
        return [len([x for x in text.split() if x.startswith("<extra_id_")]) for text in texts]
    
def tokenize_and_mask(text):
    """
    @参数 text: List[string] 分段后的文本

    +--------+
    | 返回值 |
    +--------+
    List[string] 一个掩码后的文本
    例如: "Today was a brilliant day and I did not go out to enjoy the sun."
    输出: "Today was <extra_id_0> and I did not go out to <extra_id_1> the sun."
    """
    # 掩码单词所占的比例
    PCT = args.pct_words_masked

    # 分词
    # if args.dataset == "chinese":
    #     tokens = list(text)
    # else:
    #     tokens = text.split(' ')

    tokens = text.split(' ')

    #计算掩码数量 取下限
    n_spans = PCT * len(tokens) / (args.span_length + BUFFER_SIZE * 2)
    n_spans = int(n_spans)

    # 进行掩码直到成功掩码 n_spans 个掩码
    n_masks = 0
    while n_masks < n_spans:
        # 随机初始起始位置
        start = np.random.randint(0, len(tokens) - args.span_length)
        end = start + args.span_length
        # 定义缓冲区位置
        search_start = max(0, start - BUFFER_SIZE)
        search_end = min(len(tokens), end + BUFFER_SIZE)
        # 如果随机到的位置没有掩码标记 则进行掩码操作
        if MASK_STRING not in tokens[search_start:search_end]:
            tokens[start:end] = [MASK_STRING]
            n_masks += 1
        
    # 标记的掩码位置替换为具有唯一标识符的掩码标记，并将处理后的单词列表重新组合成文本字符串
    num_filled = 0
    for idx, token in enumerate(tokens):
        if token == MASK_STRING:
            tokens[idx] = f'<extra_id_{num_filled}>'
            num_filled += 1
    assert num_filled == n_masks, f"num_filled {num_filled} != n_masks {n_masks}"
    # if args.dataset == "chinese":
    #     text = ''.join(tokens)
    # else:
    #     text = ' '.join(tokens)
    text = ' '.join(tokens)

    return text

def replace_masks(masked_text):
    """
    @参数 masked_text: List[String] 掩码后的文本

    +--------+
    | 返回值 |
    +--------+
    为每一个掩码生成一个填充文本
    例如: "<pad><extra_id_0> a sunny day <extra_id_1> ... <extra_id_N>"

    """
    # 计算掩码数量
    n_expected = count_masks(masked_text)

    # 最后一个掩码标记停止
    stop_id = mask_tokenizer.encode(f"<extra_id_{max(n_expected)}>")[0]

    #编码
    tokens = mask_tokenizer(masked_text, return_tensors="pt", padding=True).to(device)

    #生成替换文本
    outputs = mask_model.generate(
        **tokens, 
        max_length=150, 
        do_sample=True, 
        top_p=1.0, 
        num_return_sequences=1, 
        eos_token_id=stop_id)
    
    #解码
    return mask_tokenizer.batch_decode(outputs, skip_special_tokens=False)

# 从生成的填充文本中提取出实际的填充内容
def extract_fills(raw_fills):
    """
    @参数 raw_fills: List[String] 一个填充后的字符串列表，每个字符串看起来像这样:
    "<pad><extra_id_0> was a beautiful<extra_id_1> ... <extra_id_N>"
    +--------+
    | 返回值 |
    +--------+
    List[List[String]] 一个列表的列表，其中嵌套列表包含每个文本的非尖括号部分
    例如:
    [
        ['was a beautiful', 'lived in', ..., 'that it']
    ]
    """
    # 移除填充文本的开头和结尾的特殊标记
    raw_fills = [x.replace("<pad>", "").replace("</s>", "").strip() for x in raw_fills]

    # 使用掩码模式提取填充内容
    extracted_fills = [MASK_PATTERN.split(x)[1:-1] for x in raw_fills]

    # 清除填充内容周围的空白字符
    extracted_fills = [[y.strip() for y in x] for x in extracted_fills]

    return extracted_fills

# 将从生成的填充文本中提取出来的填充内容（extracted_fills）应用回原始掩码文本（masked_texts）中的相应掩码位置
def apply_extracted_fills(masked_text, extracted_fills):
    """
    @参数 masked_texts: List[String] 一个掩码后的文本 (输出自tokenize_and_mask())
    @参数 extracted_fills: List[String] 一个提取的填充 (输出自extract_fills())

    +--------+
    | 返回值 |
    +--------+
    List[String] 一个填充后的文本
    """
    # 分割成单词列表
    # tokens = []
    # if args.dataset == "chinese":
    #     for text in masked_text:
    #         # 对于中文字符 <extra_id_i>标记需要单独处理
    #         pattern = re.compile(r'<extra_id_\d+>')
    #         token = []
    #         last_end = 0
    #         for match in pattern.finditer(text):
    #             start, end = match.span()
    #             # 添加前面的普通字符
    #             if last_end < start:
    #                 token.extend(list(text[last_end:start]))
    #             # 添加特殊标记
    #             token.append(match.group())
    #             last_end = end

    #         # 添加最后的普通字符
    #         if last_end < len(text):
    #             token.extend(list(text[last_end:]))
                
    #         tokens.append(token)
    # else:
    #     tokens = [x.split(' ') for x in masked_text]
    tokens = [x.split(' ') for x in masked_text]

    n_expected = count_masks(masked_text)

    # 替换掩码标记为对应的填充内容
    for idx, (text, fills, n) in enumerate(zip(tokens, extracted_fills, n_expected)):
        if len(fills) < n:
            tokens[idx] = []
        else:
            for fill_idx in range(n):
                text[text.index(f"<extra_id_{fill_idx}>")] = fills[fill_idx]

    # 合并
    # if args.dataset == "chinese":
    #     texts = ["".join(x) for x in tokens]
    # else:
    #     texts = [" ".join(x) for x in tokens]
    texts = [" ".join(x) for x in tokens]

    return texts

def save_pkl(save_path, seg_text, perturb_sample, idx):
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    with(open(os.path.join(f"{save_path}", f"{idx}_og.pkl"), 'wb+')) as f:
        pickle.dump([seg_text], f)     
    with(open(os.path.join(f"{save_path}", f"{idx}_sample.pkl"), 'wb+')) as f:
        pickle.dump(perturb_sample, f)

if __name__ == '__main__':
    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset', type=str, default="xsum")
    parser.add_argument('--source_model_name', type=str, default="human")  
    parser.add_argument('--pct_words_masked', type=float, default=0.3)  # 掩码比例 默认0.3
    parser.add_argument('--span_length', type=int, default=2)           # 掩码长度 默认2
    parser.add_argument('--mask_filling_model_name', type=str, default="mt5-large")
    parser.add_argument('--batch_size', type=int, default=10)
    parser.add_argument('--segment_length', type=int, default=40)           # 片段长度 默认40
    parser.add_argument('--num_perturbations', type=int, default=25)        # 扰动样本数 默认50
    parser.add_argument('--checkpoint', type=int, default=0)                # 检查点 (起始位置) 默认0
    parser.add_argument('--n_samples', type=int, default=100)               # 样本数 默认200
    args = parser.parse_args()

    # 加载掩码填充模型 用于产生扰动标记
    print(f'Loading MARK model {args.mask_filling_model_name}...')
    print('Moving model to DEVICE...')
    mask_model = transformers.AutoModelForSeq2SeqLM.from_pretrained(os.path.join(model_dir, args.mask_filling_model_name)).to(device)
    try:
        n_positions = mask_model.config.n_positions
    except AttributeError:
        n_positions = 512
    mask_tokenizer = transformers.AutoTokenizer.from_pretrained(os.path.join(model_dir, args.mask_filling_model_name), model_max_length=n_positions)
    
    # 产生扰动并保存
    perturb_save()

    print(f"Data saved")