import csv
import time
import numpy as np
import datasets
import transformers
import torch
import torch.nn.functional as F
import random
import argparse
import os
import math
import dashscope
from http import HTTPStatus

dashscope.api_key = "sk-0f689d5759204e5d9ca1ec537e02d7d8"

# 用到的路径
model_dir = r'E:\TJU\Grade3\NLP\Project\cheatGPT-ryan\models'
data_dir = r'E:\TJU\Grade3\NLP\Project\checkGPT\checkGPT\datasets'
save_dir = r'..\results\generate'

# 加载模型
def load_source_model(model_name):
    """
    @ 参数 model_name: str, 调用的生成模型名称

    @ 返回值 model: transformers.PreTrainedModel, 加载的生成模型
    @ 返回值 tokenizer: transformers.PreTrainedTokenizer, 加载的分词器
    """

    print(f'Loading SOURCE model {model_name}...')
    model_path = os.path.join(model_dir, model_name)

    model, tokenizer = None, None

    # 加载模型
    model_kwargs = {}
    if '2.7B' in model_name:
        model_kwargs.update(dict(torch_dtype=torch.float16))


    model = transformers.AutoModelForCausalLM.from_pretrained(model_path, **model_kwargs)

    # 加载分词器
    optional_tok_kwargs = {}
    if "opt-" in model_name:
        print("Using non-fast tokenizer for OPT")
        optional_tok_kwargs['fast'] = False

    tokenizer = transformers.AutoTokenizer.from_pretrained(model_path, **optional_tok_kwargs)
    tokenizer.pad_token_id = tokenizer.eos_token_id

    return model, tokenizer

# sample from base_model using ****only**** the first 30 tokens in each example as context
def sample_from_model(texts, base_tokenizer, min_words=250, prompt_tokens=35):
    """
    生成文本

    @ 参数 texts: List[str], 输入文本
    @ 参数 base_tokenizer: transformers.PreTrainedTokenizer, 分词器
    @ 参数 min_words: int, 生成文本的最小单词数
    @ 参数 prompt_tokens: int, 提示词的数量

    @ 返回值 original: List[str], 原始文本
    @ 返回值 decoded: List[str], 生成的文本
    @ 返回值 prompts: List[str], 提示词
    """
    # 保存原始数据集
    original = list(texts) 

    # 对样本进行处理 将每段文本编码为 token id 列表
    all_encoded = base_tokenizer(texts, return_tensors="pt", padding=True).to(device)
    all_encoded = {key: value[:, :prompt_tokens] for key, value in all_encoded.items()}

    # 取出每个文本的提示部分
    prompts = base_tokenizer.batch_decode(all_encoded['input_ids'], skip_special_tokens=True)
    decoded = ['' for _ in range(len(texts))]

    if args.dataset == "xsum":
        # 调用模型生成文本直到每个文本都有至少 min_words 个单词
        tries = 0
        while (m := min(len(x.split()) for x in decoded)) < min_words:
            if tries != 0:
                print()
                print(f"min words: {m}, needed {min_words}, regenerating (try {tries})")
            # 为需要更多单词的文本生成输出
            mask = [len(d.split()) < min_words for d in decoded]
            masked_encoded = {k: v[mask] for k, v in all_encoded.items()}

            min_length = 300
            outputs = model.generate(**masked_encoded, 
                                     min_length=min_length, 
                                     max_length=400, 
                                     do_sample=True, 
                                     pad_token_id=base_tokenizer.eos_token_id, 
                                     eos_token_id=base_tokenizer.eos_token_id)
            new_decoded = base_tokenizer.batch_decode(outputs, skip_special_tokens=True)
            j = 0
            for i in range(len(decoded)):
                if mask[i]:
                    decoded[i] = new_decoded[j]
                    j += 1
            tries += 1

    # 从输出中消除前缀
    decoded = [d[len(p):] for d, p in zip(decoded, prompts)]
    original = [t[len(p):] for t, p in zip(original, prompts)]
    
    return original, decoded, prompts  

def trim_to_shorter_length(texta, textb):
    """
    将两个文本截断到相同的长度，即两者中较短的长度。
    @ 参数 texta: str, 文本a
    @ 参数 textb: str, 文本b

    @ 返回值 texta: str, 截断后的文本a
    @ 返回值 textb: str, 截断后的文本b
    """

    if args.dataset == "chinese":
        shorter_length = min(len(texta), len(textb))
        texta = texta[:shorter_length]
        textb = textb[:shorter_length]
        return texta, textb
    
    shorter_length = min(len(texta.split(' ')), len(textb.split(' ')))
    texta = ' '.join(texta.split(' ')[:shorter_length])
    textb = ' '.join(textb.split(' ')[:shorter_length])
    return texta, textb

def generate_samples(raw_data, base_tokenizer, batch_size):
    """
    生成样本数据集
    @ 参数 raw_data: List[str], 原始文本数据
    @ 参数 base_tokenizer: transformers.PreTrainedTokenizer, 分词器
    @ 参数 batch_size: int, 批次大小

    @ 返回值 data: dict, 生成的数据
    """
    torch.manual_seed(42)
    np.random.seed(42)

    # 初始化一个字典来存储人类文本文本、LLM生成的文本和提示词
    data = {
        "human": [],
        "LLMs": [],
        "prompt": []
    }    

    # 计算需要处理的批次数量
    for batch in range(math.ceil(len(raw_data) / batch_size)):
        print('Generating samples for batch', batch, 'of', math.ceil(len(raw_data) / batch_size))
        # 获取当前批次的原始文本
        original_text = raw_data[batch * batch_size:(batch + 1) * batch_size]

        # 使用sample_from_model函数生成文本
        original_text, sampled_text, prompts = sample_from_model(original_text, base_tokenizer, min_words=200)
        
        # 遍历当前批次的原始文本、采样文本和提示
        for o, s, p in zip(original_text, sampled_text, prompts):
            # 修剪文本以使原始文本和采样文本长度相同
            o, s = trim_to_shorter_length(o, s)

            # 将处理后的文本添加到数据字典中
            data["human"].append(o)
            data["LLMs"].append(s)
            data["prompt"].append(p)

    return data

def generate_samples_chinese(data, batch_size):
    # 生成中文数据集
    raw_data = data[:args.n_samples]
    unuse_data = data[args.n_samples:]

    # 初始化一个字典来存储人类文本文本、LLM生成的文本和提示词
    data = {
        "human": [],
        "LLMs": [],
        "prompt": []
    }    

    # 计算需要处理的批次数量
    for batch in range(math.ceil(len(raw_data) / batch_size)):
        print('Generating samples for batch', batch, 'of', math.ceil(len(raw_data) / batch_size))
        # 获取当前批次的原始文本
        original_text = raw_data[batch * batch_size:(batch + 1) * batch_size]

        for text in original_text:
            prompt = "根据下述文本生成至少300字的中文内容:" + text[:50]
            messages = [
                {'role': 'system', 'content': 'You are a helpful assistant.'},
                {'role': 'user', 'content': prompt}
            ]
            print(prompt)
            # 初始化重试次数和延时
            retries = 5
            delay = 10  # 延时10秒
            # 重试逻辑 用于请求失败时重试
            for attempt in range(retries):
                # 通过API调用模型
                response = dashscope.Generation.call(
                    model='qwen2.5-3b-instruct',
                    messages=messages,
                )

                if response.status_code == HTTPStatus.OK:
                    data['LLMs'].append(response['output']['text'])
                    data['human'].append(text)
                    data['prompt'].append(prompt)
                    break
                
                # 错误代码为429时表示请求过多，需要重试
                elif response.status_code == 429:
                    print(f"请求过多，正在重试...（尝试次数：{attempt + 1})")
                    time.sleep(delay)
                    # 每次重试时增加延时
                    delay *= 2
                else:
                    print('Request failed, Status code:', response.status_code)
                    print(f"错误信息：{response.message}")
                    if response.message == "Input data may contain inappropriate content.":
                        # 从剩余的文本中重新获取一个文本
                        text = unuse_data.pop()
                        prompt = "根据下述文本生成至少300字的中文内容:" + text[:50]

                        print(f"替换的文本提示: {prompt}")
                        messages = [
                            {'role': 'system', 'content': 'You are a helpful assistant.'},
                            {'role': 'user', 'content': prompt}
                        ]
                    else:
                        break
    
    return data

# 从csv文件中加载data
def load_data_from_csv(file_dir):
    file_path = os.path.join(file_dir, 'chinese.csv')
    data = []
    with open(file_path, 'r', encoding='utf-8') as csvfile:
        reader = csv.reader(csvfile)
        next(reader)  # 跳过第一行标题
        for row in reader:
            if row:  # 确保行不为空
                data.append(row[0])
    return data

# 去除换行符
def strip_newlines(text):
    return ' '.join(text.split())

def generate_data(dataset, key, base_tokenizer, preproc_tokenizer):
    """
    将源数据集中文本进行预处理
    1. 去重
    2. 去除空白字符
    3. 去除换行符
    4. 仅保留长度大于 min_length 的示例
    将预处理后的数据使用LLMs生成样本数据集

    @ 参数 dataset: str, 数据集名称
    @ 参数 key: str, 数据集的键
    @ 参数 n_samples: int, 生成的样本数量
    @ 参数 base_tokenizer: transformers.PreTrainedTokenizer, 基础分词器
    @ 参数 preproc_tokenizer: transformers.PreTrainedTokenizer, 预处理分词器

    @ 返回值 data: dict, 生成的数据
    """
    
    # 加载数据集
    if dataset == "xsum":
        data = datasets.load_from_disk(os.path.join(data_dir, dataset))[key]
    elif dataset == "chinese":
        data = load_data_from_csv(os.path.join(data_dir, dataset))

    # 去重
    data = list(dict.fromkeys(data)) 
    
    # 去除空白字符
    data = [x.strip() for x in data]

    # 去除换行符
    data = [strip_newlines(x) for x in data]

    # 设置最小长度
    # 筛选出合适的样本
    min_length = 250
    long_data = []
    if args.dataset == "squad":
        min_length = 200
    
    if args.dataset in ["xsum", "squad"]:
        long_data = [x for x in data if len(x.split()) > min_length]
    elif args.dataset == "chinese":
        long_data = [x for x in data if len(x) > min_length]

    if len(long_data) > 0:
        data = long_data

    # 随机打乱数字顺序
    random.seed(0)
    random.shuffle(data)

    #取前5000样本
    data = data[:5_000]

    max_length = 512
    # 进行分词标注
    # 仅保留长度小于等于512的示例
    if args.dataset in ["xsum", "squad"]:
        tokenized_data = preproc_tokenizer(data)
        data = [x for x, y in zip(data, tokenized_data["input_ids"]) if len(y) <= max_length]
    elif args.dataset == "chinese":
        tokenized_data = preproc_tokenizer(data, truncation=True, padding=True, max_length=max_length)
        data = [x for x, y in zip(data, tokenized_data["input_ids"]) if len(y) <= max_length]

    # 输出样本数以及平均单词数
    print(f"Total number of samples: {len(data)}")
    print(f"Average number of words: {np.mean([len(x.split()) for x in data])}")

    # 取前n_samples个样本进行生成
    if args.dataset == 'chinese':
        return generate_samples_chinese(data, args.batch_size)
    else:
        return generate_samples(data[:args.n_samples], base_tokenizer, args.batch_size)

#保存文件地址
def save_data(data):
    human_save_path = os.path.join(save_dir, f"{args.dataset}", "human")
    LLMs_save_path = os.path.join(save_dir, f"{args.dataset}", "LLMs", f"{args.source_model_name}")
    prompt_save_path = os.path.join(save_dir, f"{args.dataset}", "prompt")

    # # 存储人类文本 如果已经存在则不保存
    # if os.path.exists(os.path.join(human_save_path, "0.txt")):
    #     print(f"Human text already exists, skipping")
    # else:
    #     if not os.path.exists(human_save_path):
    #         os.makedirs(human_save_path)
    #     for idx, text in enumerate(data["human"]):
    #         with open(os.path.join(human_save_path, f"{idx}.txt"), 'w', encoding='utf-8') as f:
    #             f.write(text)

        # 存储人类文本 如果已经存在则不保存
    if os.path.exists(os.path.join(human_save_path, "0.txt")):
        if not os.path.exists(human_save_path):
            os.makedirs(human_save_path)
        for idx, text in enumerate(data["human"]):
            with open(os.path.join(human_save_path, f"{idx}.txt"), 'w', encoding='utf-8') as f:
                f.write(text)
    else:
        if not os.path.exists(human_save_path):
            os.makedirs(human_save_path)
        for idx, text in enumerate(data["human"]):
            with open(os.path.join(human_save_path, f"{idx}.txt"), 'w', encoding='utf-8') as f:
                f.write(text)

    # 存储提示文本 如果已经存在则不保存
    if os.path.exists(os.path.join(prompt_save_path, "0.txt")):
        if not os.path.exists(prompt_save_path):
            os.makedirs(prompt_save_path)
        for idx, text in enumerate(data["prompt"]):
            with open(os.path.join(prompt_save_path, f"{idx}.txt"), 'w', encoding='utf-8') as f:
                f.write(text)
    else:
        if not os.path.exists(prompt_save_path):
            os.makedirs(prompt_save_path)
        for idx, text in enumerate(data["prompt"]):
            with open(os.path.join(prompt_save_path, f"{idx}.txt"), 'w', encoding='utf-8') as f:
                f.write(text)
    
    # 存储LLMs生成的文本
    if not os.path.exists(LLMs_save_path):
        os.makedirs(LLMs_save_path)
    for idx, text in enumerate(data["LLMs"]):
        with open(os.path.join(LLMs_save_path, f"{idx}.txt"), 'w', encoding='utf-8') as f:
            f.write(text)

    print(f"Data saved")
    
if __name__ == '__main__':
    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')

    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset', type=str, default="xsum")              # 使用数据集名称
    parser.add_argument('--dataset_key', type=str, default="document")      # 数据集的键
    parser.add_argument('--n_samples', type=int, default=200)               # 使用的样本的数量
    parser.add_argument('--source_model_name', type=str,required=True, help='Please specify the source model name.')                    # 使用的生成模型名称
    parser.add_argument('--batch_size', type=int, default=10)                # 批次大小
    args = parser.parse_args()

    # 加载标记处理器 用于将文本数据转换为模型输入
    preproc_tokenizer = transformers.AutoTokenizer.from_pretrained(os.path.join(model_dir, "mt5-large"), model_max_length=512)

    if args.dataset != "chinese":
        # 模型放到GPU上
        # 加载基础模型和分词器
        model, tokenizer = load_source_model(args.source_model_name)
        print('Moving model to DEVICE...')
        model.to(device)
    else:
        model, tokenizer = None, None

    print(f'Loading dataset {args.dataset}...')

    # 使用源模型对对应数据集生成数据
    data = generate_data(args.dataset, args.dataset_key, tokenizer, preproc_tokenizer)
    

    # 保存数据
    save_data(data)
