import json
import re
import jieba
import torch
from model import get_padding_mask,get_subsequent_mask
from config import *
import sacrebleu
# 参考句子

#中文分词
def divided_zh(sentence):
    return jieba.lcut(sentence)
#英文分词
def divided_en(sentence):
    #使用正则表达式匹配单词和标点符号
    pattern = r'\w+|[^\w\s]'
    return re.findall(pattern,sentence)

def get_vocab(lang='en'):
    if lang == 'en':
        file_path = EN_VOCAB_PATH
    elif lang == 'zh':
        file_path = ZH_VOCAB_PATH
    with open(file_path,encoding='utf-8') as file:
        lines = file.read()
    id2vocab = lines.split('\n')
    vacab2id = {v:k for k,v in enumerate(id2vocab)}
    return id2vocab,vacab2id

#逐字生成预测值
def batch_greedy_decode(model,src_x,src_mask,max_len=MAX_LEN):
    src_x = src_x.to(device)
    src_mask = src_mask.to(device)
    #获取中文词表
    zh_id2vocab,_ = get_vocab("zh")
    #encoder输出
    memory = model.encoder(src_x,src_mask)
    #初始化decoder的输入
    prob_x = torch.tensor([[SOS_ID]] * src_x.size(0))
    prob_x = prob_x.to(device)

    for _ in range(max_len):
        #创建decoder的mask
        prob_pad_mask = get_padding_mask(prob_x,PAD_ID).to(device)
        prob_subsequent_mask = get_subsequent_mask(prob_x.size(1)).to(device)
        prob_mask = prob_pad_mask | prob_subsequent_mask
        prob_mask = prob_mask != 0

        decoder_output = model.decoder(prob_x,prob_mask,memory,src_mask)
        #通过前一个词来预测后一个词
        output = model.generator(decoder_output[:,-1,:])
        output = torch.softmax(output,dim=-1)
        predict = torch.argmax(output,dim=-1,keepdim=True)
        #把之前的值拼到一起
        prob_x = torch.concat([prob_x,predict],dim=-1)
        #如果预测的当前值是eos,表示整个序列输出结束
        if torch.all(predict == EOS_ID).item():
            break
    #把预测出来的索引值，转换为对应的值
    batch_prob_text = []
    for prob in prob_x:
        prob_text = []
        for prob_id in prob:
            if prob_id == SOS_ID:
                continue
            if prob_id == EOS_ID:
                break
            prob_text.append(zh_id2vocab[prob_id])
        batch_prob_text.append("".join(prob_text))
    return batch_prob_text

def bleu_score(hyp,refs):
    bleu = sacrebleu.corpus_bleu(hyp,refs,tokenize='zh')
    return round(bleu.score,2)


import json
import os


def split_json(path=TRAIN_SAMPLE_PATH):
    # 检查文件是否存在
    if not os.path.exists(path):
        print(f"错误：文件不存在 - {path}")
        return

    # 检查文件是否为空
    if os.path.getsize(path) == 0:
        print(f"错误：文件为空 - {path}")
        return

    try:
        with open(path, 'r', encoding='utf-8') as file:
            content = file.read().strip()

            # 检查读取内容是否为空
            if not content:
                print("错误：文件内容为空")
                return

            try:
                lines = json.loads(content)

                # 确保解析结果是列表
                if not isinstance(lines, list):
                    print("错误：JSON内容不是列表格式")
                    return

                # 截取前400条数据
                to_write = lines[:100]

                with open(path, 'w', encoding='utf-8') as write_file:
                    json.dump(to_write, write_file, ensure_ascii=False, indent=2)

                print(f"成功处理：保留了{len(to_write)}条记录")

            except json.JSONDecodeError as e:
                print(f"JSON解析错误: {str(e)}")
                print(f"错误位置: 行 {e.lineno}, 列 {e.colno}")
                # 显示错误位置附近的内容
                start = max(0, e.pos - 50)
                end = min(len(content), e.pos + 50)
                print(f"错误上下文: ...{content[start:end]}...")

    except UnicodeDecodeError:
        print("错误：文件编码不是UTF-8，无法正确读取")
    except Exception as e:
        print(f"处理文件时发生意外错误: {str(e)}")


if __name__ == '__main__':
    # target = "我喜欢读书"
    # vocabs = divided_zh(target)
    # id2vocab,vacab2id = get_vocab("zh")
    # # 将词汇列表转换为对应的ID列表
    # # 使用vocab2id字典将每个词汇映射为ID，如果词汇不存在则使用UNK_ID作为默认值
    # tokens = [vacab2id.get(v,UNK_ID) for v in vocabs]
    # print(tokens)
    split_json(path=TRAIN_SAMPLE_PATH)
