import os
import requests
import json
import re
import threading
import queue
import logging

try:
    import pdfplumber
except ImportError:
    print("请先安装pdfplumber库：pip install pdfplumber")
    exit(1)

from docx import Document

# 日志配置
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s [%(levelname)s] %(message)s',
    handlers=[
        logging.FileHandler("0721/pre-train-log.log", encoding="utf-8"),
        logging.StreamHandler()
    ]
)

# 3个模型服务配置
model_services = [
    {
        "api_url": "http://18.0.31.1:9981/v1/chat/completions",
        "model": "ds671"
    }
    # ,
    # {
    #     "api_url": "http://18.0.31.2:9981/v1/chat/completions",
    #     "model": "/home/llm/qwq32"
    # }

]

task_queue = queue.Queue()
result_lock = threading.Lock()

# 目标目录
target_dir = "/Users/aresen/Downloads/00客服项目支撑/3知识库相关/集团制度/集团制度文档/1"

# 固定请求参数
system_prompt = (
    "你是企业级规章制度专家，擅长识别各种钢铁企业内部的规章制度、工作办法。"
    "请严格按照以下要求处理："
    "1. 只输出原文条款内容，不要编造。"
    "2. 每个条款必须输出为一条独立的JSON对象，整体为jsonl格式，每行一个对象。"
    "3. 每条的格式必须严格为：{\"text\": \"[标题] 文章标题-主要部分/分类/小标题 [条款] ：具体条款内容原文\"}。"
    "4. [标题]必须为当前所属文章标题（如有多个附件，则为当前附件名称），不能省略或替换，不能输出文件路径。"
    "5. [条款]必须为原文条款编号及内容，不能省略或替换。"
    "6. 如果有主要部分/分类/小标题，也要一并输出在[标题]后面。"
    "7. [标题]和[条款]这两个标识必须原样输出，不能丢失、不能改写。"
    "8. 严禁输出如下内容：'文章标题-主要部分/分类/小标题'、'具体条款内容原文'、'[标题] [条款编号] ：具体内容'、任何文件路径、任何占位符或模板内容。"
    "9. 只输出jsonl格式内容，不要输出任何解释、说明、前后缀、代码块标记等。"
    "10. 如果有附加解释，放入具体条款内容中。"
)

pre_train_empty_files = []
results = {}

MAX_TOKENS = 30720
MAX_COMPLETION_TOKENS = 20480  # 你期望的最大生成长度
MAX_INPUT_TOKENS = MAX_TOKENS - MAX_COMPLETION_TOKENS

def split_content(content, max_length):
    # 按字符数分割，实际可根据tokenizer更精细
    return [content[i:i+max_length] for i in range(0, len(content), max_length)]

def read_pdf(file_path):
    try:
        with pdfplumber.open(file_path) as pdf:
            text = ""
            for page in pdf.pages:
                text += page.extract_text() or ""
        return text.strip()
    except Exception as e:
        print(f"无法解析PDF: {file_path}, 错误: {e}")
        return ""

def is_garbled(text):
    # 判断是否大部分为CID或乱码
    cid_count = len(re.findall(r'CID\d+', text))
    total_len = len(text)
    # 如果CID占比很高，认为是乱码
    return total_len > 0 and cid_count * 6 > total_len * 0.5  # 6是CIDxxxx的长度

def read_docx(file_path):
    try:
        doc = Document(file_path)
        text = "\n".join([para.text for para in doc.paragraphs])
        if is_garbled(text):
            print(f"文件内容为乱码: {file_path}")
            return ""
        return text.strip()
    except Exception as e:
        print(f"无法解析DOCX: {file_path}, 错误: {e}")
        return ""

def read_text_file(file_path):
    try:
        with open(file_path, "r", encoding="utf-8") as f:
            return f.read().strip()
    except Exception as e:
        print(f"无法读取文本文件: {file_path}, 错误: {e}")
        return ""

def fix_json_array(s):
    # 去除多余逗号，修正常见的JSON数组格式问题
    s = re.sub(r',\s*([\]}])', r'\1', s)  # 去除结尾多余逗号
    s = re.sub(r'(\{[^\}]*\})\s*(\{)', r'\1,\2', s)  # 补逗号
    if not s.strip().startswith('['):
        s = '[' + s
    if not s.strip().endswith(']'):
        s = s + ']'
    return s

def extract_json_from_content(content, file_path=None):
    import re
    import json

    # 优先提取```json ...```代码块
    match = re.search(r"```json\\s*(.*?)```", content, re.DOTALL)
    if match:
        json_str = match.group(1)
        try:
            return json.loads(fix_json_array(json_str))
        except Exception as e:
            print("JSON代码块解析失败，内容如下：\n", json_str)
            print("错误：", e)
    # 尝试直接提取第一个JSON数组
    match = re.search(r'(\\[\\s*{.*?}\\s*\\])', content, re.DOTALL)
    if match:
        json_str = match.group(1)
        try:
            return json.loads(fix_json_array(json_str))
        except Exception as e:
            print("正文中JSON数组解析失败，内容如下：\n", json_str)
            print("错误：", e)
            if file_path:
                with open("0721/llm_bad_response.jsonl", "a", encoding="utf-8") as f:
                    f.write(json.dumps({"file": file_path, "content": content}, ensure_ascii=False) + "\\n")
    # 尝试直接解析为JSON数组
    try:
        lines = [line.strip() for line in content.strip().splitlines() if line.strip()]
        jsonl_objs = []
        for line in lines:
            # 只处理以{开头且以}结尾的行
            if line.startswith("{") and line.endswith("}"):
                try:
                    jsonl_objs.append(json.loads(line))
                except Exception as e2:
                    print(f"jsonl单行解析失败: {line}\n错误: {e2}")
            else:
                # 可选：用正则提取第一个{...}再尝试解析
                import re
                match = re.search(r'({.*})', line)
                if match:
                    try:
                        jsonl_objs.append(json.loads(match.group(1)))
                    except Exception as e2:
                        print(f"jsonl正则提取后解析失败: {line}\n错误: {e2}")
                else:
                    print(f"跳过非JSON行: {line}")
        if jsonl_objs:
            return jsonl_objs
    except Exception as e2:
        print("jsonl整体解析失败", e2)
    print("直接解析失败，原始content如下：\\n", content)
    print("错误：", e)
    if file_path:
        with open("0721/llm_bad_response.jsonl", "a", encoding="utf-8") as f:
            f.write(json.dumps({"file": file_path, "content": content}, ensure_ascii=False) + "\\n")
    return []
    # 优先提取```json ...```代码块
    match = re.search(r"```json\s*(.*?)```", content, re.DOTALL)
    if match:
        json_str = match.group(1)
        try:
            return json.loads(fix_json_array(json_str))
        except Exception as e:
            print("JSON代码块解析失败，内容如下：\n", json_str)
            print("错误：", e)
    # 尝试直接提取第一个JSON数组
    match = re.search(r'(\[\s*{.*?}\s*\])', content, re.DOTALL)
    if match:
        json_str = match.group(1)
        try:
            return json.loads(fix_json_array(json_str))
        except Exception as e:
            print("正文中JSON数组解析失败，内容如下：\n", json_str)
            print("错误：", e)
            if file_path:
                with open("0721/llm_bad_response.jsonl", "a", encoding="utf-8") as f:
                    f.write(json.dumps({"file": file_path, "content": content}, ensure_ascii=False) + "\n")
    # 尝试直接解析
    try:
        return json.loads(fix_json_array(content))
    except Exception as e:
        print("直接解析失败，原始content如下：\n", content)
        print("错误：", e)
        if file_path:
            with open("0721/llm_bad_response.jsonl", "a", encoding="utf-8") as f:
                f.write(json.dumps({"file": file_path, "content": content}, ensure_ascii=False) + "\n")
    return []

def clean_and_standardize(items, file_title=None, chapter_title=None):
    cleaned = []
    # 定义无效内容关键词
    invalid_patterns = [
        "文章标题-主要部分/分类/小标题",
        "具体条款内容原文",
        "[标题] [条款编号] ：具体内容",
        "文件路径",
        ".docx",
        ".pdf",
        ".txt",
        "：具体内容",
        "：具体条款内容原文"
    ]
    for item in items:
        if not isinstance(item, dict):
            continue
        text = item.get("text", "").strip()
        # 跳过空内容
        if not text:
            continue
        # 跳过包含无效内容的条目
        if any(pat in text for pat in invalid_patterns):
            continue
        # 自动补全[标题]和[条款]标识
        if "[标题]" not in text or "[条款]" not in text:
            if file_title and chapter_title:
                m = re.search(r"(第[一二三四五六七八九十百千万]+条)", text)
                t_no = m.group(1) if m else ""
                text = f"[标题] {file_title}-{chapter_title} [条款] {t_no} ：{text}"
            else:
                continue
        if "[条款]" not in text or "：" not in text:
            continue
        cleaned.append({"text": text})
    return cleaned

def append_to_json_list(filename, item):
    import os, json
    if os.path.exists(filename):
        with open(filename, "r", encoding="utf-8") as f:
            try:
                data = json.load(f)
            except Exception:
                data = []
    else:
        data = []
    data.append(item)
    with open(filename, "w", encoding="utf-8") as f:
        json.dump(data, f, ensure_ascii=False, indent=2)

# 1. 先将所有待处理文件（及分段）放入队列
for root, dirs, files in os.walk(target_dir):
    for file in files:
        if file.startswith('.'):
            continue
        file_path = os.path.join(root, file)
        ext = os.path.splitext(file)[-1].lower()
        content = ""
        if ext == ".pdf":
            content = read_pdf(file_path)
        elif ext == ".docx":
            content = read_docx(file_path)
        else:
            content = read_text_file(file_path)
        if not content:
            pre_train_empty_files.append(file_path)
            with open("0721/pre_train_empty_files.json", "w", encoding="utf-8") as ef:
                json.dump(pre_train_empty_files, ef, ensure_ascii=False, indent=2)
            append_to_json_list("0721/unusable_files.json", file_path)
            logging.warning(f"不可用文件: {file_path}")
            continue
        content_chunks = split_content(content, MAX_INPUT_TOKENS)
        for idx, chunk in enumerate(content_chunks):
            task_queue.put((file_path, idx, chunk))
            logging.info(f"任务入队: {file_path} 第{idx+1}段")

def worker(model_service):
    logging.info(f"线程启动: {model_service['model']}")
    while not task_queue.empty():
        try:
            file_path, idx, chunk = task_queue.get_nowait()
        except queue.Empty:
            break
        payload = {
            "model": model_service["model"],
            "temperature": 0.5,
            "max_tokens": MAX_COMPLETION_TOKENS,
            "messages": [
                {"role": "system", "content": system_prompt},
                {"role": "user", "content": chunk}
            ],
            "stream": False
        }
        logging.info(f"[{model_service['model']}] 开始请求: {file_path} 第{idx+1}段")
        try:
            response = requests.post(model_service["api_url"], json=payload, timeout=600000)
            response.raise_for_status()
            result = response.json()
            content = result["choices"][0]["message"]["content"]
            extracted = extract_json_from_content(content, file_path)
            cleaned = clean_and_standardize(extracted, file_path, "集团制度文档") # 假设文件标题为集团制度文档
            result_key = f"{file_path}__part{idx+1}"
            with result_lock:
                if os.path.exists("0721/pre-train-model-results.json"):
                    with open("0721/pre-train-model-results.json", "r", encoding="utf-8") as rf:
                        try:
                            all_results = json.load(rf)
                        except Exception:
                            all_results = {}
                else:
                    all_results = {}
                all_results[result_key] = cleaned
                with open("0721/pre-train-model-results.json", "w", encoding="utf-8") as wf:
                    json.dump(all_results, wf, ensure_ascii=False, indent=2)
            logging.info(f"[{model_service['model']}] 处理成功: {file_path} 第{idx+1}段")
        except Exception as e:
            logging.error(f"[{model_service['model']}] 处理失败: {file_path} 第{idx+1}段, 错误: {e}")
        finally:
            task_queue.task_done()
    logging.info(f"线程结束: {model_service['model']}")

threads = []
for model_service in model_services:
    t = threading.Thread(target=worker, args=(model_service,))
    t.start()
    threads.append(t)

for t in threads:
    t.join()

# 可选：将结果保存到本地
import json

# 读取所有分段处理结果
with open("0721/pre-train-model-results.json", "r", encoding="utf-8") as f:
    all_results = json.load(f)

with open("0721/pre-train-texts.json", "w", encoding="utf-8") as out_f:
    for v in all_results.values():
        # v 可能是list，也可能是dict
        if isinstance(v, list):
            for item in v:
                if isinstance(item, dict) and "text" in item and item["text"].strip():
                    out_f.write(json.dumps(item, ensure_ascii=False) + "\n")
        elif isinstance(v, dict) and "text" in v and v["text"].strip():
            out_f.write(json.dumps(v, ensure_ascii=False) + "\n")

