import difflib
import jieba
import re
import os
from itertools import zip_longest
from bert_similarity import compute_bert_similarity
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity as cosine_sim
import logging
from docx import Document

# -------------------------------
# 配置参数（可配置化）
# -------------------------------
config = {
    "semantic_threshold": 0.85,          # BERT 语义相似度阈值（>=此值认为语义相同）
    "ngram_n": 3,                        # n-gram 的 n 值
    "repetition_min_length": 20,         # 连续重复汉字的最小长度（≥该值视为连续重复）
    "tfidf_token_pattern": r"(?u)\b\w+\b",  # TF-IDF 分词正则
    "match_line_threshold": 0.3,         # 命中阈值：匹配行比例>=此值认为文件命中
    "match_ratio": 0.8                   # (备用)最长匹配比例阈值
}

# -------------------------------
# 日志配置
# -------------------------------
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
logger = logging.getLogger(__name__)

#########################################
# 从 library 文件夹读取所有 docx 文件内容及文件名
#########################################
def load_db_text_and_filenames(folder_path):
    db_text = ""
    file_names = []
    for filename in os.listdir(folder_path):
        if filename.lower().endswith(".docx"):
            file_names.append(filename)
            file_path = os.path.join(folder_path, filename)
            try:
                doc = Document(file_path)
                content = "\n".join([para.text for para in doc.paragraphs if para.text.strip() != ""])
                db_text += content + "\n"
                logger.info("加载文件 %s，长度：%d", filename, len(content))
            except Exception as e:
                logger.error("读取文件 %s 失败：%s", filename, e)
    if not file_names:
        logger.warning("未从文件夹 %s 中读取到任何 docx 文件", folder_path)
        file_names = ["未找到标准文件"]
    db_display = "<br/>".join(file_names)
    logger.info("标准文件列表：%s", db_display)
    logger.info("标准文本总长度：%d", len(db_text))
    return db_text, db_display

#########################################
# 检测命中文件（按行匹配）
#########################################
def get_matched_files(folder_path, upload_text, match_line_threshold):
    matched = []
    upload_lines = [re.sub(r'\s+', '', line) for line in upload_text.splitlines() if line.strip()]
    total_lines = len(upload_lines)
    # 将上传文本转换为小写，用于文件名判断
    upload_text_lower = upload_text.lower()
    for filename in os.listdir(folder_path):
        if filename.lower().endswith(".docx"):
            file_path = os.path.join(folder_path, filename)
            try:
                doc = Document(file_path)
                content = "\n".join([para.text for para in doc.paragraphs if para.text.strip() != ""])
                content_norm = re.sub(r'\s+', '', content)
                match_count = sum(1 for line in upload_lines if line in content_norm)
                ratio = match_count / total_lines if total_lines > 0 else 0
                logger.info("文件 %s 匹配行比例：%.2f%%", filename, ratio * 100)
                # 如果上传文本中直接包含该文件名（不含扩展名），也认为命中
                file_basename = os.path.splitext(filename)[0].lower()
                if ratio >= match_line_threshold or file_basename in upload_text_lower:
                    matched.append(f"{filename} ({ratio:.2%})")
            except Exception as e:
                logger.error("读取文件 %s 失败：%s", filename, e)
    return "<br/>".join(matched)

#########################################
# 文本预处理与分段处理
#########################################
def preprocess_text(text):
    pre_text = text.strip()
    logger.info("预处理后的文本长度：%d", len(pre_text))
    return pre_text

#########################################
# 文本拆分与分词（保留换行信息）
#########################################
def tokenize_text(text):
    text = preprocess_text(text)
    if "\n" in text:
        lines = text.splitlines()
    else:
        parts = re.split(r'(。|！|？)', text)
        lines = []
        for i in range(0, len(parts), 2):
            if i + 1 < len(parts):
                lines.append(parts[i] + parts[i+1])
            else:
                lines.append(parts[i])
    logger.info("拆分出 %d 行", len(lines))
    
    tokens = []
    for i, line in enumerate(lines):
        line_tokens = list(jieba.cut(line))
        tokens.extend(line_tokens)
        if i < len(lines) - 1:
            tokens.append("\n")
    logger.info("总共生成 token 数：%d", len(tokens))
    return tokens

def join_tokens(tokens):
    out = []
    for token in tokens:
        if token == "\n":
            out.append("<br/>")
        else:
            out.append(token)
    result = ''.join(out)
    logger.info("拼接后的文本长度：%d", len(result))
    return result

#########################################
# 行级差异比较（基于 difflib+BERT）
#########################################
def process_line_diff(upload_line, lib_line, semantic_threshold):
    norm_upload = re.sub(r'\s+', '', upload_line)
    norm_lib = re.sub(r'\s+', '', lib_line)
    if norm_upload and len(norm_upload) >= 10 and norm_upload in norm_lib:
        return f'<span style="color:red;">{upload_line}</span>'
    
    tokens_upload = list(jieba.cut(upload_line))
    tokens_lib = list(jieba.cut(lib_line))
    logger.debug("处理行差异：上传分词：%s", tokens_upload)
    logger.debug("处理行差异：标准分词：%s", tokens_lib)
    
    matcher = difflib.SequenceMatcher(None, tokens_upload, tokens_lib)
    highlighted_tokens = []
    for tag, i1, i2, j1, j2 in matcher.get_opcodes():
        if tag == 'equal':
            for token in tokens_upload[i1:i2]:
                if token.strip():
                    highlighted_tokens.append(f'<span style="color:red;">{token}</span>')
                else:
                    highlighted_tokens.append(token)
        elif tag == 'replace':
            block_upload = ''.join(tokens_upload[i1:i2]).replace("\n", " ")
            block_lib = ''.join(tokens_lib[j1:j2]).replace("\n", " ")
            sim = compute_bert_similarity(block_upload, block_lib)
            logger.info("替换块语义相似度：%f, 上传块：'%s', 标准块：'%s'", sim, block_upload, block_lib)
            if sim >= semantic_threshold:
                for token in tokens_upload[i1:i2]:
                    if token.strip():
                        highlighted_tokens.append(f'<span style="color:orange;">{token}</span>')
                    else:
                        highlighted_tokens.append(token)
            else:
                highlighted_tokens.extend(tokens_upload[i1:i2])
        elif tag in ['delete', 'insert']:
            highlighted_tokens.extend(tokens_upload[i1:i2])
    return ''.join(highlighted_tokens)

def get_diff_html(upload_text, lib_text, semantic_threshold):
    norm_db_text = re.sub(r'\s+', '', lib_text)
    def split_text_to_lines(text):
        text = preprocess_text(text)
        if "\n" in text:
            return text.splitlines()
        else:
            parts = re.split(r'(。|！|？)', text)
            lines = []
            for i in range(0, len(parts), 2):
                if i + 1 < len(parts):
                    lines.append(parts[i] + parts[i+1])
                else:
                    lines.append(parts[i])
            return lines

    upload_lines = split_text_to_lines(upload_text)
    lib_lines = split_text_to_lines(lib_text)
    logger.info("上传文本共拆分为 %d 行，标准文本共拆分为 %d 行", len(upload_lines), len(lib_lines))
    highlighted_lines = []
    for ul, ll in zip_longest(upload_lines, lib_lines, fillvalue=""):
        norm_ul = re.sub(r'\s+', '', ul)
        if norm_ul and len(norm_ul) >= 10 and norm_ul in norm_db_text:
            diff_line = f'<span style="color:red;">{ul}</span>'
        else:
            diff_line = process_line_diff(ul, ll, semantic_threshold)
        highlighted_lines.append(diff_line)
    highlighted_html = "<br/>".join(highlighted_lines)
    # 去除连续重复的 <br/> 标签
    highlighted_html = re.sub(r'(<br\s*/?>\s*)+', '<br/>', highlighted_html)
    overall_similarity = difflib.SequenceMatcher(None, upload_text, lib_text).ratio()
    return highlighted_html, overall_similarity

#########################################
# 其他相似度指标计算（n-gram、编辑距离、TF-IDF余弦）
#########################################
def build_ngrams(tokens, n):
    return [''.join(tokens[i:i+n]) for i in range(len(tokens)-n+1)]

def jaccard_similarity(set1, set2):
    intersection = set1.intersection(set2)
    union = set1.union(set2)
    if not union:
        return 0
    return len(intersection) / len(union)

def compute_ngram_similarity(text1, text2, n):
    tokens1 = list(jieba.cut(text1))
    tokens2 = list(jieba.cut(text2))
    ngrams1 = set(build_ngrams(tokens1, n))
    ngrams2 = set(build_ngrams(tokens2, n))
    sim = jaccard_similarity(ngrams1, ngrams2)
    logger.info("n-gram (n=%d) Jaccard相似度：%f", n, sim)
    return sim

def compute_edit_distance_similarity(text1, text2):
    sim = difflib.SequenceMatcher(None, text1, text2).ratio()
    logger.info("编辑距离相似度：%f", sim)
    return sim

def compute_cosine_similarity(text1, text2, token_pattern):
    vectorizer = TfidfVectorizer(token_pattern=token_pattern)
    tfidf = vectorizer.fit_transform([text1, text2])
    cosine = cosine_sim(tfidf[0:1], tfidf[1:2])[0][0]
    logger.info("余弦相似度：%f", cosine)
    return cosine

#########################################
# 检测连续重复汉字
#########################################
def detect_continuous_repetition(upload_text, db_text, min_length):
    pattern = re.compile(r'[\u4e00-\u9fff]{%d,}' % min_length)
    segments = []
    for match in pattern.finditer(upload_text):
        seg = match.group()
        if seg in db_text:
            segments.append((match.start(), match.end(), seg))
    logger.info("检测到 %d 个连续重复汉字片段", len(segments))
    return segments

#########################################
# 生成三栏查重报告 HTML（左侧：上传标记；中间：命中文件；右侧：标准文件列表）
#########################################
def get_triple_column_report(upload_text, db_text, db_display, folder_path,
                             semantic_threshold=config["semantic_threshold"], 
                             ngram_n=config["ngram_n"], repetition_min_length=config["repetition_min_length"],
                             tfidf_token_pattern=config["tfidf_token_pattern"],
                             match_ratio=config["match_line_threshold"]):
    diff_html, diff_similarity = get_diff_html(upload_text, db_text, semantic_threshold)
    logger.info("difflib+BERT整体相似度：%.2f%%", diff_similarity * 100)
    
    ngram_sim = compute_ngram_similarity(upload_text, db_text, ngram_n)
    edit_sim = compute_edit_distance_similarity(upload_text, db_text)
    cosine_sim_value = compute_cosine_similarity(upload_text, db_text, tfidf_token_pattern)
    
    rep_segments = detect_continuous_repetition(upload_text, db_text, repetition_min_length)
    rep_total_length = sum(end - start for start, end, _ in rep_segments)
    overall_length = len(upload_text)
    repetition_ratio = rep_total_length / overall_length if overall_length > 0 else 0
    if rep_segments:
        rep_html = "<ul>"
        for start, end, seg in rep_segments:
            rep_html += f"<li>位置 {start}-{end}: {seg}</li>"
        rep_html += "</ul>"
    else:
        rep_html = f"无连续重复内容（≥{repetition_min_length}个汉字）"
    
    matched_files = get_matched_files(folder_path, upload_text, match_ratio)
    
    html = f"""<!DOCTYPE html>
<html>
<head>
    <meta charset="utf-8">
    <title>标准查重报告</title>
    <style>
        body {{
            font-family: "Microsoft YaHei", sans-serif;
            line-height: 1.6;
            margin: 20px;
        }}
        .container {{
            display: flex;
            justify-content: space-between;
        }}
        .column {{
            width: 32%;
            border: 1px solid #ccc;
            box-sizing: border-box;
            margin-bottom: 20px;
        }}
        .header {{
            background: #f0f0f0;
            padding: 5px;
            text-align: center;
            font-weight: bold;
        }}
        .content {{
            max-height: 300px !important;
            overflow-y: auto;
            white-space: pre-wrap;
            padding: 5px;
            word-break: break-all;
        }}
        .section {{
            margin-top: 20px;
        }}
    </style>
</head>
<body>
    <h2>标准查重详细报告</h2>
    <div class="container">
        <div class="column">
            <div class="header">上传文件内容标记</div>
            <div class="content">{diff_html}</div>
        </div>
        <div class="column">
            <div class="header">命中文件</div>
            <div class="content">{matched_files if matched_files else "无命中文件"}</div>
        </div>
        <div class="column">
            <div class="header">标准文件列表</div>
            <div class="content">{db_display}</div>
        </div>
    </div>
    <div class="section">
        <h3>相似度指标</h3>
        <ul>
            <li>difflib+BERT整体相似度：{diff_similarity:.2%}</li>
            <li>n-gram（n={ngram_n}）Jaccard相似度：{ngram_sim:.2%}</li>
            <li>编辑距离相似度：{edit_sim:.2%}</li>
            <li>TF-IDF余弦相似度：{cosine_sim_value:.2%}</li>
        </ul>
    </div>
    <div class="section">
        <h3>连续重复内容检测（连续重复≥{config["repetition_min_length"]}个汉字）</h3>
        {rep_html}
        <p>连续重复内容总长度占比：{repetition_ratio:.2%}</p>
    </div>
</body>
</html>
"""
    return html

#########################################
# 主函数 - 运行生成查重报告
#########################################
if __name__ == "__main__":
    # 模拟上传文件文本（可从 docx 或文本文件读取）
    upload_text = """中华人民共和国纺织总会发布
我是一个小作家，写作本领强，我要把那小本子，写得满当当，1234。

GB 2.2-2007.docx
减少了高温作业分级和高温作业场所气象条件的卫生学评价标准

GB 1.2-2002.docx
e) 联络和合作
与其他标准化技术委员会或有关机构的联络和合作。
f) 与有关文件的协调

FZ_T 24008-1998.docx
4.5.1.2操作方法：
a) 精纺织品可拉齐边纱后修正布边； 精纺织品可拉齐边纱后修正布边；
"""
    # 从 library 文件夹中读取所有 docx 文件内容及文件名
    folder_path = "library"
    db_text, db_display = load_db_text_and_filenames(folder_path)
    
    html_report = get_triple_column_report(upload_text, db_text, db_display, folder_path,
                                           semantic_threshold=config["semantic_threshold"],
                                           ngram_n=config["ngram_n"],
                                           repetition_min_length=config["repetition_min_length"],
                                           tfidf_token_pattern=config["tfidf_token_pattern"])
    with open("diff_report.html", "w", encoding="utf-8") as f:
        f.write(html_report)
    logger.info("HTML标准查重报告已保存到 diff_report.html")