import sys
import json
import re
from tqdm import tqdm
def extract_chinese_substrings(text, min_length=2):
    substrings = set()
    text = text.replace(" ", "")
    chinese_sequences = re.findall(r'[\u4e00-\u9fff]+', text)
    for seq in chinese_sequences:
        if len(seq) >= min_length:
            for length in range(min_length, len(seq)+1):
                for i in range(len(seq)-length+1):
                    substr = seq[i:i+length]
                    substrings.add(substr)
    return substrings

def extract_english_words(text):
    return set(re.findall(r'\b[A-Za-z]+\b', text))

def remove_overlapping_substrings(substrings):
    sorted_substrings = sorted(substrings, key=lambda x: -len(x))
    result = set()
    for substr in sorted_substrings:
        if not any(substr in existing for existing in result):
            result.add(substr)
    return result

def extract_keywords(lab, context_text):
    context_text = context_text.replace("\\n", "\n")
    chinese_substrings = extract_chinese_substrings(lab, min_length=2)
    english_words = extract_english_words(lab)

    context_chinese_substrings = extract_chinese_substrings(context_text, min_length=2)
    context_english_words = extract_english_words(context_text)

    common_chinese = chinese_substrings.intersection(context_chinese_substrings)
    common_english = english_words.intersection(context_english_words)

    filtered_common_chinese = remove_overlapping_substrings(common_chinese)

    keywords = filtered_common_chinese.union(common_english)
    return keywords

def tokenize_text(text):
    """
    将文本分词为中英文混合序列：
    中文：逐字
    英文：完整单词
    """
    parts = re.findall(r'[\u4e00-\u9fff]|[A-Za-z]+|\S', text)
    tokens = []
    for p in parts:
        if re.match(r'[\u4e00-\u9fff]', p):
            tokens.append(p)
        elif re.match(r'^[A-Za-z]+$', p):
            tokens.append(p)
        else:
            # 其他字符不计入
            pass
    return tokens

def levenshtein_alignment(ref_tokens, hyp_tokens):
    N = len(ref_tokens)
    M = len(hyp_tokens)
    dp = [[(0, '') for _ in range(M+1)] for _ in range(N+1)]
    for i in range(1, N+1):
        dp[i][0] = (i, 'del')
    for j in range(1, M+1):
        dp[0][j] = (j, 'ins')
    for i in range(1, N+1):
        for j in range(1, M+1):
            if ref_tokens[i-1] == hyp_tokens[j-1]:
                cost_sub = dp[i-1][j-1][0]
                op = 'match'
            else:
                cost_sub = dp[i-1][j-1][0] + 1
                op = 'sub'
            cost_del = dp[i-1][j][0] + 1
            cost_ins = dp[i][j-1][0] + 1
            min_cost = min(cost_sub, cost_del, cost_ins)
            if min_cost == cost_sub:
                dp[i][j] = (cost_sub, op)
            elif min_cost == cost_del:
                dp[i][j] = (cost_del, 'del')
            else:
                dp[i][j] = (cost_ins, 'ins')

    i, j = N, M
    alignment = []
    while i >0 or j >0:
        cost, op = dp[i][j]
        if op == 'match' or op == 'sub':
            alignment.append((ref_tokens[i-1], hyp_tokens[j-1]))
            i -=1
            j -=1
        elif op == 'del':
            alignment.append((ref_tokens[i-1], None))
            i -=1
        elif op == 'ins':
            alignment.append((None, hyp_tokens[j-1]))
            j -=1
    alignment.reverse()

    C=S=D=I=0
    for (r,h) in alignment:
        if r is None and h is not None:
            I+=1
        elif r is not None and h is None:
            D+=1
        elif r is not None and h is not None:
            if r == h:
                C+=1
            else:
                S+=1
    return C,S,D,I,alignment

def is_english_word(token):
    return bool(re.match(r'^[A-Za-z]+$', token))

def is_chinese_char(token):
    return bool(re.match(r'^[\u4e00-\u9fff]$', token))

def mark_bias_tokens(ref_tokens, keywords):
    """
    根据关键词在ref_tokens中的位置，将这些位置标记为bias区域。

    实现：
    1. 将ref_tokens连接成joined_ref，并记录每个token在joined_ref中的字符起止位置。
    2. 对中文关键词，在joined_ref中搜索子串出现位置。
       对英文关键词，直接在ref_tokens中匹配。
    3. 根据匹配到的字符范围反查对应的token，将bias_mask中相应位置标记为True。
    """
    bias_mask = [False]*len(ref_tokens)

    # 构建joined_ref及token的字符范围映射
    joined_ref = ""
    token_char_offsets = []  # (start_char_idx, end_char_idx) for each token in joined_ref
    current_offset = 0
    for t in ref_tokens:
        start = current_offset
        end = start + len(t)  # 因为英文单词可能多字符，中文一个字len=1
        joined_ref += t
        token_char_offsets.append((start, end))
        current_offset = end

    # 将关键词区分中英文
    chinese_keywords = [kw for kw in keywords if re.search('[\u4e00-\u9fff]', kw)]
    english_keywords = [kw for kw in keywords if re.match('^[A-Za-z]+$', kw)]

    # 对中文关键词：在joined_ref中匹配子串
    # 对匹配到的字符范围内的token设bias_mask=True
    for kw in chinese_keywords:
        start_pos = 0
        while True:
            pos = joined_ref.find(kw, start_pos)
            if pos == -1:
                break
            end_pos = pos + len(kw)

            # 找出与[pos, end_pos)区间有交集的token
            for i, (token_start, token_end) in enumerate(token_char_offsets):
                # 如果token与kw子串有重叠，则该token标记为bias
                if token_start < end_pos and token_end > pos:
                    bias_mask[i] = True
            start_pos = pos+1

    # 对英文关键词：在ref_tokens中寻找完全匹配的token
    for kw in english_keywords:
        for i, t in enumerate(ref_tokens):
            if t == kw:
                bias_mask[i] = True

    return bias_mask


def process_line(data, global_stats):
    uttid = data.get('id','')
    lab = data.get('text','')
    rec = data.get('hypothesis','')
    context_text = data.get('context_text','')

    # 提取关键词
    keywords = extract_keywords(lab, context_text)

    # 分词
    ref_tokens = tokenize_text(lab)
    hyp_tokens = tokenize_text(rec)

    # 标记bias token
    bias_mask = mark_bias_tokens(ref_tokens, keywords)

    # 对齐并统计WER
    C,S,D,I,alignment = levenshtein_alignment(ref_tokens, hyp_tokens)
    N = len(ref_tokens)
    WER_value = (S+D+I)/N*100 if N>0 else 0.0

    # 计算B-WER、U-WER（基于bias_mask）
    B_N = B_C = B_S = B_D = B_I = 0
    U_N = U_C = U_S = U_D = U_I = 0

    # Mandarin & English统计
    mN=mC=mS=mD=mI=0
    eN=eC=eS=eD=eI=0

    for idx, (r,h) in enumerate(alignment):
        if r is not None:
            is_bias = bias_mask[ ref_tokens.index(r) ] if r in ref_tokens else False
            # 这里有个问题：alignment中r是ref_tokens[idx_ref]中的一个token
            # alignment是顺序的，r应该对应ref_tokens的某个index
            # 因为alignment是顺序的，我可以通过遍历alignment的序号来获知ref索引
            # 不用ref_tokens.index(r), alignment是按ref_tokens的顺序对齐的
            # 改为用一个ref_index在回溯时我们就知道r对应的是第几个ref token
            # 为简化起见，我们在构造alignment时就知道其ref序号，或者直接用idx对alignment结果即可。
            # alignment是按ref顺序进行的，所以第idx个非插入项对应ref_tokens[idx_ref]，但插入会打乱计数。
            # 我们可以在levenshtein对齐后再处理。
            # 简单处理：alignment是顺序的，每出现一个非None的r就对应下一个ref token:
            # 但是插入项(r=None)不消耗ref_token序号
            # 我们需要单独计数ref_token序号
            
            # 改进：在对齐后统计时需要知道r对应ref_tokens的索引
            # 在alignment返回后添加索引信息方便:
            # 为避免大幅改动，先临时方案：下方统一进行二次处理alignment加上ref_index

    # 为了正确处理bias_mask，我们需要知道alignment中每个ref token的位置
    # 重新计算alignment时同时返回ref_index列表
def levenshtein_alignment_with_index(ref_tokens, hyp_tokens):
    C,S,D,I,alignment = levenshtein_alignment(ref_tokens, hyp_tokens)
    # alignment为 (r,h) 列表
    # 我们要给alignment加上ref_index（即r对应ref_tokens的下标）
    ref_index_list = []
    ref_idx_counter = 0
    for (r,h) in alignment:
        if r is not None:
            ref_index_list.append(ref_idx_counter)
            ref_idx_counter += 1
        else:
            # 插入不消耗ref token计数
            ref_index_list.append(None)
    return C,S,D,I, list(zip(alignment, ref_index_list))


def process_line(data, global_stats):
    uttid = data.get('id','')
    lab = data.get('text','')
    rec = data.get('hypothesis','')
    context_text = data.get('context_text','')

    keywords = extract_keywords(lab, context_text)

    ref_tokens = tokenize_text(lab)
    hyp_tokens = tokenize_text(rec)

    bias_mask = mark_bias_tokens(ref_tokens, keywords)

    C,S,D,I, align_with_index = levenshtein_alignment_with_index(ref_tokens, hyp_tokens)
    # align_with_index 每个元素是 ((r,h), ref_idx或None)
    # N= len(ref_tokens)

    N = len(ref_tokens)
    WER_value = (S+D+I)/N*100 if N>0 else 0.0

    B_N = B_C = B_S = B_D = B_I = 0
    U_N = U_C = U_S = U_D = U_I = 0
    mN=mC=mS=mD=mI=0
    eN=eC=eS=eD=eI=0

    for (r,h), r_idx in align_with_index:
        if r is not None:
            is_bias = bias_mask[r_idx]
            is_eng = is_english_word(r)
            is_chn = is_chinese_char(r)
            if is_chn:
                mN +=1
            if is_eng:
                eN +=1

            # 分类到B或U
            if is_bias:
                B_N +=1
            else:
                U_N +=1

            if h is None:
                # deletion
                if is_bias:
                    B_D +=1
                else:
                    U_D +=1
                if is_chn:
                    mD+=1
                if is_eng:
                    eD+=1
            else:
                if r == h:
                    # correct
                    if is_bias:
                        B_C+=1
                    else:
                        U_C+=1
                    if is_chn:
                        mC+=1
                    if is_eng:
                        eC+=1
                else:
                    # substitution
                    if is_bias:
                        B_S+=1
                    else:
                        U_S+=1
                    if is_chn:
                        mS+=1
                    if is_eng:
                        eS+=1
        else:
            # insertion
            if h is not None:
                # 插入词是否在bias关键词区域要根据h是否是关键词
                # 但对中文关键词来说，h是一个或多个字符token中的单个字符
                # 插入的token是独立的，若它刚好是英文关键词则B_I++
                # 若是中文关键词的一个子字？一般不考虑插入的中文char属于关键词
                # 用户需求是关键词基于lab确定的区域，插入词不在lab中出现过，不应标记为B
                # 除非插入的token恰好是一个英文关键词(整词)
                # 对中文关键词而言插入的单个字无法确认是关键词一部分（关键词来自ref中）
                # 因此插入的中文字基本不能算入B，只能算U
                # 但如果英文关键词恰好等于插入的token（一个英文单词），则算B
                in_keyword = False
                if is_chinese_char(h):
                    mI+=1
                elif is_english_word(h):
                    eI+=1
                else:
                    # 其他字符也可能有，以后需要的话可以加统计
                    pass
                # 检查h是否为英文关键词
                # 如果h是英文单词并在keywords中则in_keyword=True
                if h in keywords:
                    in_keyword = True

                if in_keyword:
                    B_I+=1
                else:
                    U_I+=1
                # 不计入mN/eN，因为插入没有对应ref token
                # 如果需要统计插入的语种错误，可以在此判断h是否中文/英文并计入mI/eI

    B_WER = ((B_S+B_D+B_I)/B_N*100) if B_N>0 else 0.0
    U_WER = ((U_S+U_D+U_I)/U_N*100) if U_N>0 else 0.0

    # 更新全局统计
    global_stats['overall']['N'] += N
    global_stats['overall']['C'] += C
    global_stats['overall']['S'] += S
    global_stats['overall']['D'] += D
    global_stats['overall']['I'] += I

    global_stats['mandarin']['N'] += mN
    global_stats['mandarin']['C'] += mC
    global_stats['mandarin']['S'] += mS
    global_stats['mandarin']['D'] += mD
    global_stats['mandarin']['I'] += mI

    global_stats['english']['N'] += eN
    global_stats['english']['C'] += eC
    global_stats['english']['S'] += eS
    global_stats['english']['D'] += eD
    global_stats['english']['I'] += eI

    global_stats['bias']['N'] += B_N
    global_stats['bias']['C'] += B_C
    global_stats['bias']['S'] += B_S
    global_stats['bias']['D'] += B_D
    global_stats['bias']['I'] += B_I

    global_stats['unbias']['N'] += U_N
    global_stats['unbias']['C'] += U_C
    global_stats['unbias']['S'] += U_S
    global_stats['unbias']['D'] += U_D
    global_stats['unbias']['I'] += U_I

    sorted_keywords = sorted(keywords, key=lambda x: (-len(x), x))
    keywords_str = " ".join(sorted_keywords)

    lab_spaced = " ".join(ref_tokens)
    rec_spaced = " ".join(hyp_tokens)

    result_lines = []
    result_lines.append(f"utt: {uttid}")
    result_lines.append(f"WER: {WER_value:.2f} % N={N} C={C} S={S} D={D} I={I}")
    result_lines.append(f"B-WER: {B_WER:.2f}% N={B_N} C={B_C} S={B_S} D={B_D} I={B_I}")
    result_lines.append(f"U-WER: {U_WER:.2f}% N={U_N} C={U_C} S={U_S} D={U_D} I={U_I}")
    result_lines.append(f"keywords: {keywords_str}")
    result_lines.append(f"lab: {lab_spaced}")
    result_lines.append(f"rec: {rec_spaced}")
    return "\n".join(result_lines)

def print_global_stats(global_stats):
    o = global_stats['overall']
    Overall_WER = (o['S']+o['D']+o['I'])/o['N']*100 if o['N']>0 else 0.0

    m = global_stats['mandarin']
    Mandarin_WER = (m['S']+m['D']+m['I'])/m['N']*100 if m['N']>0 else 0.0

    e = global_stats['english']
    English_WER = (e['S']+e['D']+e['I'])/e['N']*100 if e['N']>0 else 0.0

    b = global_stats['bias']
    B_WER = (b['S']+b['D']+b['I'])/b['N']*100 if b['N']>0 else 0.0

    u = global_stats['unbias']
    U_WER = (u['S']+u['D']+u['I'])/u['N']*100 if u['N']>0 else 0.0

    print("===========================================================================")
    print(f"Overall -> {Overall_WER:.2f} % N={o['N']} C={o['C']} S={o['S']} D={o['D']} I={o['I']}")
    print(f"Mandarin -> {Mandarin_WER:.2f} % N={m['N']} C={m['C']} S={m['S']} D={m['D']} I={m['I']}")
    print(f"English -> {English_WER:.2f} % N={e['N']} C={e['C']} S={e['S']} D={e['D']} I={e['I']}")
    print(f"B-WER -> {B_WER:.2f}% N={b['N']} C={b['C']} S={b['S']} D={b['D']} I={b['I']}")
    print(f"U-WER -> {U_WER:.2f}% N={u['N']} C={u['C']} S={u['S']} D={u['D']} I={u['I']}")
    print("===========================================================================")

def main():
    if len(sys.argv) != 2:
        print("用法: python script.py input.jsonl", file=sys.stderr)
        sys.exit(1)

    input_path = sys.argv[1]

    global_stats = {
        'overall': {'N':0,'C':0,'S':0,'D':0,'I':0},
        'mandarin': {'N':0,'C':0,'S':0,'D':0,'I':0},
        'english': {'N':0,'C':0,'S':0,'D':0,'I':0},
        'bias': {'N':0,'C':0,'S':0,'D':0,'I':0},
        'unbias': {'N':0,'C':0,'S':0,'D':0,'I':0}
    }

    results = []
    with open(input_path, 'r', encoding='utf-8') as f:
        for line_num, line in tqdm(enumerate(f,1)):
            line=line.strip()
            if not line:
                continue
            try:
                data = json.loads(line)
            except json.JSONDecodeError:
                continue
            result = process_line(data, global_stats)
            if result:
                results.append(result)

    for line in results:
        print(line)
        print()

    print_global_stats(global_stats)

if __name__ == "__main__":
    main()
