import json
import sys
import subprocess
import re
import pdb
# 检查命令行参数
if len(sys.argv) != 5:
    print("Usage: python script.py <input_jsonl> <reference_file> <hypothesis_file> <wer_output>")
    sys.exit(1)

# 获取命令行参数
input_jsonl = sys.argv[1]
reference_file = sys.argv[2]
hypothesis_file = sys.argv[3]
output_wer = sys.argv[4]
wer_script = 'tools/compute-wer.py'
PUNCS = '!,.?;:'

word_map = {
    "some one": "someone",
    "every one": "everyone",
    "any one": "anyone",
    "no one": "no one",
    "every thing": "everything",
    "some thing": "something",
    "any thing": "anything",
    "some time": "sometime",
    "every time": "everytime",
    "some where": "somewhere",
    "any where": "anywhere",
    "no where": "nowhere",
    "what ever": "whatever",
    "how ever": "however",
    "where ever": "wherever",
    "who ever": "whoever",
    "for ever": "forever",
    "stream line": "streamline",
    "out side": "outside",
    "in side": "inside",
    "on line": "online",
    "under way": "underway",
    "any body": "anybody",
    "some body": "somebody",
    "no body": "nobody",
    "to get her": "together",
    "to day": "today",
    "to night": "tonight",
    "to morrow": "tomorrow",
    "by pass": "bypass",
    "out fit": "outfit",
    "service ability": "serviceability"
}

def process_english_parts(text):
    # 使用正则表达式找到所有英文部分
    english_parts = re.findall(r'[A-Za-z ]+', text)
    
    for part in english_parts:
        stripped_part = part.strip()  # 去掉英文部分前后的空格
        
        # 计算字母和空格的数量
        letter_count = sum(1 for char in stripped_part if char.isalpha())
        space_count = stripped_part.count(' ')
        
        # 如果空格数量等于字母数量 - 1，去掉所有中间空格
        if space_count == letter_count - 1:
            cleaned_part = stripped_part.replace(' ', '')
            cleaned_part = cleaned_part + " "
            text = text.replace(part, cleaned_part, 1)  # 只替换第一个匹配的部分
        
    return text

def remove_intro_phrases(text):
    # 要删除的开头短语列表
    intro_phrases = [
        "这段音频的原始内容是：",
        "这段音频说的是："
    ]
    
    # 检查文本是否以任何指定短语开头
    for phrase in intro_phrases:
        if text.startswith(phrase):
            # 如果匹配，去掉开头短语并返回处理后的文本
            return text[len(phrase):].strip()
    
    # 如果没有匹配的短语，返回原文本
    return text

def post_hyp(text):
    text = text.strip("'") 
    text = remove_intro_phrases(text)
    gt = re.sub(r"<\|.*?\|>", " ", text)
    gt = re.sub(rf"\s+", r" ", gt)  # 将文本中的连续空格替换为单个空格
    gt = re.sub(f" ?([{PUNCS}])", r"\1", gt)
    gt = gt.replace(".", "")
    gt = gt.replace(",", "")
    gt = gt.replace("?", "")
    gt = gt.replace("!", "")
    gt = gt.replace("-", " ")
    gt = gt.replace("。", "")
    gt = gt.replace("，", "")
    gt = gt.replace("？", "")
    gt = gt.replace("！", "")
    gt = gt.lstrip(" ")
    for k in word_map:
        gt = gt.replace(k, word_map[k])
    return gt
def post_ref(text):
    orig_text = text
    # text = re.sub(r'\b([A-Z])(?: ([A-Z]))\b(?![A-Z])', lambda m: m.group(0).replace(' ', ''), text)
#    text = process_english_parts(text)

    # pdb.set_trace()
    gt = text
    if text and len(text) > 0:
        gt = re.sub(r"<\|.*?\|>", " ", text)
        gt = re.sub(rf"\s+", r" ", gt)  # 将文本中的连续空格替换为单个空格
        gt = re.sub(f" ?([{PUNCS}])", r"\1", gt)
        gt = gt.replace(".", "")
        gt = gt.lstrip(" ")

    return gt

# 读取 JSONL 文件并提取 reference 和 hypothesis
with open(input_jsonl, 'r', encoding='utf-8') as infile, \
     open(reference_file, 'w', encoding='utf-8') as ref_out, \
     open(hypothesis_file, 'w', encoding='utf-8') as hyp_out:
    
    for line in infile:
        data = json.loads(line)
        ref_out.write(data['id'] + " " + post_ref(data['text']) + '\n')
        hyp_out.write(data['id'] + " " + post_hyp(data['hypothesis']) + '\n')

# 调用 compute-wer.py 脚本计算 WER
subprocess.run(
    f"python {wer_script} --char=1 --v=1 {reference_file} {hypothesis_file} > {output_wer}",
    shell=True
)
