import os
import re

import numpy as np
import torch
import stanza
import transformers
import gradio as gr 

MASK_PATTERN = re.compile(r"<extra_id_\d+>")
MASK_STRING = '<<<mask>>>'

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  
model_dir = r'E:\TJU\Grade3\NLP\Project\cheatGPT-ryan\models'

# 加载模型
def load_source_model(model_name):
    print(f'Loading SCORING model {model_name}...')
    model_path = os.path.join(model_dir, model_name)

    model, tokenizer = None, None

    # 加载模型
    model_kwargs = {}
    if '2.7B' in model_name:
        model_kwargs.update(dict(torch_dtype=torch.float16))

    model = transformers.AutoModelForCausalLM.from_pretrained(model_path, **model_kwargs)

    # 加载分词器
    optional_tok_kwargs = {}
    if "opt-" in model_name:
        print("Using non-fast tokenizer for OPT")
        optional_tok_kwargs['fast'] = False
    tokenizer = transformers.AutoTokenizer.from_pretrained(model_path, **optional_tok_kwargs)
    tokenizer.pad_token_id = tokenizer.eos_token_id

    return model, tokenizer

# 中文分词
def stanza_segment(text):
    """
    使用Stanza对中文文本进行语义分词
    @参数 text: String 输入文本
    @返回值: String 分词后的文本，不同词之间用空格隔开
    """
    nlp = stanza.Pipeline('zh', processors='tokenize')  # 只加载分词处理器

    doc = nlp(text)

    segments = []
    for sentence in doc.sentences:
        for word in sentence.tokens:
            segments.append(word.text)

    tokenized_text = " ".join(segments)
    return tokenized_text

# 文本掩码处理
def tokenize_and_mask(text):
    # 掩码单词所占的比例
    PCT = 0.3

    # 分词
    tokens = text.split(' ')

    #计算掩码数量 取下限
    n_spans = PCT * len(tokens) / (2 + 1 * 2)
    n_spans = int(n_spans)

    # 进行掩码直到成功掩码 n_spans 个掩码
    n_masks = 0
    while n_masks < n_spans:
        # 随机初始起始位置
        start = np.random.randint(0, len(tokens) - 2)
        end = start + 2
        # 定义缓冲区位置
        search_start = max(0, start - 1)
        search_end = min(len(tokens), end + 1)
        # 如果随机到的位置没有掩码标记 则进行掩码操作
        if MASK_STRING not in tokens[search_start:search_end]:
            tokens[start:end] = [MASK_STRING]
            n_masks += 1
        
    # 标记的掩码位置替换为具有唯一标识符的掩码标记，并将处理后的单词列表重新组合成文本字符串
    num_filled = 0
    for idx, token in enumerate(tokens):
        if token == MASK_STRING:
            tokens[idx] = f'<extra_id_{num_filled}>'
            num_filled += 1
    assert num_filled == n_masks, f"num_filled {num_filled} != n_masks {n_masks}"
    text = ' '.join(tokens)
    return text

# 掩码替代
def replace_masks(masked_text):
    # 计算掩码数量
    n_expected = count_masks(masked_text)

    # 最后一个掩码标记停止
    stop_id = mask_tokenizer.encode(f"<extra_id_{max(n_expected)}>")[0]

    #编码
    tokens = mask_tokenizer(masked_text, return_tensors="pt", padding=True).to(device)

    #生成替换文本
    outputs = mask_model.generate(
        **tokens, 
        max_length=150, 
        do_sample=True, 
        top_p=1.0, 
        num_return_sequences=1, 
        eos_token_id=stop_id)
    
    #解码
    return mask_tokenizer.batch_decode(outputs, skip_special_tokens=False)
# 计算掩码数量
def count_masks(texts):
    #检查当前单词 x 是否以字符串 <extra_id_ 开头，计算掩码数目
    return [len([x for x in text.split() if x.startswith("<extra_id_")]) for text in texts]

# 从生成的填充文本中提取出实际的填充内容
def extract_fills(raw_fills):
    """
    @参数 raw_fills: List[String] 一个填充后的字符串列表，每个字符串看起来像这样:
    "<pad><extra_id_0> was a beautiful<extra_id_1> ... <extra_id_N>"
    +--------+
    | 返回值 |
    +--------+
    List[List[String]] 一个列表的列表，其中嵌套列表包含每个文本的非尖括号部分
    例如:
    [
        ['was a beautiful', 'lived in', ..., 'that it']
    ]
    """

    # 移除填充文本的开头和结尾的特殊标记
    raw_fills = [x.replace("<pad>", "").replace("</s>", "").strip() for x in raw_fills]

    # 使用掩码模式提取填充内容
    extracted_fills = [MASK_PATTERN.split(x)[1:-1] for x in raw_fills]

    # 清除填充内容周围的空白字符
    extracted_fills = [[y.strip() for y in x] for x in extracted_fills]

    return extracted_fills

# 将从生成的填充文本中提取出来的填充内容（extracted_fills）应用回原始掩码文本（masked_texts）中的相应掩码位置
def apply_extracted_fills(masked_text, extracted_fills):
    """
    @参数 masked_texts: List[String] 一个掩码后的文本 (输出自tokenize_and_mask())
    @参数 extracted_fills: List[String] 一个提取的填充 (输出自extract_fills())

    +--------+
    | 返回值 |
    +--------+
    List[String] 一个填充后的文本
    """
    # 分割成单词列表
    tokens = [x.split(' ') for x in masked_text]

    n_expected = count_masks(masked_text)

    # 替换掩码标记为对应的填充内容
    for idx, (text, fills, n) in enumerate(zip(tokens, extracted_fills, n_expected)):
        if len(fills) < n:
            tokens[idx] = []
        else:
            for fill_idx in range(n):
                text[text.index(f"<extra_id_{fill_idx}>")] = fills[fill_idx]

    # 合并
    texts = [" ".join(x) for x in tokens]
    return texts

def segment(text):
    tokenized_text = text.split(' ')
    text_length = len(tokenized_text)
    n_segments = max(1, int(np.floor(text_length / 40)))
    segment_length = int(np.ceil(text_length / n_segments))
    segmented_texts = []
    for segment_idx in range(n_segments):
        text_segment = tokenized_text[segment_idx * segment_length:(segment_idx + 1) * segment_length]
        segmented_texts.append(" ".join(text_segment))
    return segmented_texts

def perturb(text_segments):
    perturb_sample = []
    for i in range(25):
        masked_text = [tokenize_and_mask(x) for x in text_segments]
        raw_fills = replace_masks(masked_text)
        extracted_fills = extract_fills(raw_fills)
        perturbed_texts = apply_extracted_fills(masked_text, extracted_fills)
        attempts = 1
        while '' in perturbed_texts:
            idxs = [idx for idx, x in enumerate(perturbed_texts) if x == '']
            masked_texts = [tokenize_and_mask(x) for idx, x in enumerate(text_segments) if idx in idxs]
            raw_fills = replace_masks(masked_texts)
            extracted_fills = extract_fills(raw_fills)
            new_perturbed_texts = apply_extracted_fills(masked_texts, extracted_fills)
            for idx, x in zip(idxs, new_perturbed_texts):
                perturbed_texts[idx] = x
            attempts += 1
            if attempts >= 3:
                break
        perturb_sample.append(perturbed_texts)
    return perturb_sample
    
def get_ll_of_sample(sample, model, tokenizer):
    ll_of_sample = np.zeros((len(sample), len(sample[0])))
    with torch.no_grad():
        for perturb_idx, seg_set in enumerate(sample):
            for seg_idx, seg in enumerate(seg_set):
                tokenized = tokenizer(seg, return_tensors="pt").to(device)
                labels = tokenized.input_ids
                ll_of_sample[perturb_idx, seg_idx] = model(**tokenized, labels=labels).loss.item()
    return ll_of_sample

def check(txt, model_name, language):
    # 分词
    if language == '中文':
        txt = stanza_segment(txt)
    text_segments = segment(txt)
    perturb_sample = perturb(text_segments)

    # 清空GPU内存
    torch.cuda.empty_cache()

    # 加载计算模型
    model, tokenizer = load_source_model(model_name)
    model.to(device)

    og_ll = get_ll_of_sample(perturb_sample, model, tokenizer)
    perturb_sample_ll = get_ll_of_sample(perturb_sample, model, tokenizer)
    z_scores = (og_ll - perturb_sample_ll.mean(axis=0)) / perturb_sample_ll.std(axis=0)
    z_score_var = np.var(z_scores)

    # 清空GPU缓存
    torch.cuda.empty_cache()

    print(z_score_var)
    
    if z_score_var < 0.15:
        return "该文本是机器生成的文本"
    else:
        return "该文本是人类撰写的文本"
    
def show_txt(txt, chatbot):
    chatbot.append((txt, "正在检测文本中..."))

    return chatbot

def check_txt(txt, chatbot, language, model):
    result = check(txt, model, language)
    chatbot.append((txt, result))

    return chatbot

def update_model_choices(language):
    if language == '英文':
        return gr.update(choices=['gpt2-xl', 'gpt-neo-2.7B', 'opt-2.7B'], value = 'gpt2-xl')
    else:
        return gr.update(choices=['bert-base-chinese'], value='bert-base-chinese')
    
with gr.Blocks() as demo:     
    language_radio = gr.Radio(choices=['英文','中文'], label='语言', value = '英文') 
    model_radio = gr.Radio(choices=['gpt2-xl', 'gpt-neo-2.7B', 'opt-2.7B'], label='模型', value='gpt2-xl')

    language_radio.change(fn=update_model_choices, inputs=language_radio, outputs=model_radio) 

    initial_messages = [(None, "欢迎使用CheckGPT！"), 
                        (None, "CheckGPT是一个用于检查文本是否为大语言模型发生的工具。请选择检测文本语言与使用的模型，并在下方输入框中输入文本，然后按回车键。")]

    chatbot = gr.Chatbot(value=initial_messages, elem_id="chatbot", height=550, label="CheckGPT")

    with gr.Row():         
        with gr.Column(scale=0.85):          
            txt = gr.Textbox(                
                show_label=False,                
                placeholder="输入文本，按回车确认",      
                container=False          
            )
    
    def clear_textbox():
        return gr.update(value="")
    
    txt.submit(clear_textbox, inputs=[], outputs=[txt])
    txt.submit(show_txt, inputs=[txt, chatbot], outputs=chatbot)
    txt.submit(check_txt, inputs=[txt, chatbot, language_radio, model_radio], outputs=chatbot)
    demo.queue()

mask_model = transformers.AutoModelForSeq2SeqLM.from_pretrained(os.path.join(model_dir, "mt5-large")).to(device)
try:
    n_positions = mask_model.config.n_positions
except AttributeError:
    n_positions = 512
mask_tokenizer = transformers.AutoTokenizer.from_pretrained(os.path.join(model_dir, "mt5-large"), model_max_length=n_positions)
    
demo.launch()

               
