import zipfile
from collections import defaultdict
from lxml import etree
import json
import re
from openai import OpenAI
import os

OPENAI_API_KEY = 'sk-6bc589928d1e4b7289a8c85f5fe72a23'
OPENAI_API_BASE = 'https://dashscope.aliyuncs.com/compatible-mode/v1'

client = OpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_API_BASE)


def parse_docx(docx_path, result_path):
    """
    Parse a docx file and extract requirements, saving directly to database if requirement_id is provided
    """
    # Get all SDT elements first to calculate progress
    with zipfile.ZipFile(docx_path) as docx:
        xml_content = docx.read("word/document.xml")

    tree = etree.fromstring(xml_content)
    ns = {'w':'http://schemas.openxmlformats.org/wordprocessingml/2006/main'}

    # Find all work items first to track progress
    all_sdt_elements = tree.xpath('//w:sdt', namespaces=ns)
    work_item_elements = []
    
    for sdt in all_sdt_elements:
        alias = sdt.xpath('.//w:alias/@w:val', namespaces=ns)
        if alias and alias[0] == "Work Item":
            work_item_elements.append(sdt)

    work_items = defaultdict(dict)
    work_items["categories"]=[{"name":os.path.basename(docx_path),"tests":[]}]

    # ---------- 正则模式 ----------
    pattern = re.compile(
        r'初始状态[:：]\s*(.*?)\s*触发条件[:：]\s*(.*?)\s*执行动作[:：]\s*(.*)', 
        re.DOTALL
    )
    print(f"Total work items found: {len(work_item_elements)}")
    # ---------- 遍历内容控件 ----------
    for sdt in work_item_elements:
        print("Processing a work item...")
        # 获取蓝色方块标题
        texts = sdt.xpath('.//w:t/text()', namespaces=ns)
        content = "\n".join(texts).strip()
        
        lines = content.splitlines()
        item_id = lines[1]
        item_title = lines[3]
        
        item_content = "".join(lines[4:]).strip()
        item_content = item_content.replace('[已发布]', '')
        
        # ---------- 用一次性正则匹配三项 ----------
        m = pattern.search(item_content)
        if m:
            initial_state = m.group(1).strip()
            trigger_condition = m.group(2).strip()
            action = m.group(3).strip()
            
            # Check if any of the extracted parts are empty or too short to be valid
            if not initial_state or not trigger_condition or not action:
                # If regex extraction didn't work properly, use LLM to extract
                extracted_parts = extract_three_part_with_llm(item_content)
                initial_state = extracted_parts.get('initial_state', initial_state).strip()
                trigger_condition = extracted_parts.get('trigger_condition', trigger_condition).strip()
                action = extracted_parts.get('action', action).strip()
        else:
            # If regex failed completely, use LLM to extract
            extracted_parts = extract_three_part_with_llm(item_content)
            initial_state = extracted_parts.get('initial_state', '').strip()
            trigger_condition = extracted_parts.get('trigger_condition', '').strip()
            action = extracted_parts.get('action', '').strip()

        work_items[item_id] = {
            "case_id": item_id,
            "case_name": item_title,
            "initial_state": initial_state.strip(),
            "trigger_condition": trigger_condition.strip(),
            "action": action.strip(),
            "original_text": item_content
        }
        with open(result_path, 'w', encoding='utf-8') as f:
            for single_res in work_items.values():
                if isinstance(single_res, dict) and 'case_id' in single_res:
                    f.write(json.dumps(single_res, ensure_ascii=False) + '\n')
    return work_items


def extract_three_part_with_llm(content):
    """
    使用大模型提取三部分信息
    """
    # 如果没有明确划分，用大模型拆分
    prompt = f"""请将以下文本划分为'初始状态'，'触发条件'，'执行动作'三部分: {content}。
返回格式为：
```json
{{
    "初始状态": "...",
    "触发条件": "...",
    "执行动作": "..."
}}
    """
    
    try:
        response = client.chat.completions.create(
            model="qwen3-32b",
            messages=[{"role":"user","content":prompt}],
            extra_body={"enable_thinking": False},
            temperature=0
        )
        model_output = response.choices[0].message.content
        
        # 解析 JSON，正则匹配 ```json ... ```
        json_pattern = re.compile(r'```json(.*?)```', re.DOTALL)
        m = json_pattern.search(model_output)
        if m:
            json_str = m.group(1).strip()
        else:
            json_pattern = re.compile(r'```(.*?)```', re.DOTALL)
            m = json_pattern.search(model_output)
            
        if m:
            parsed = json.loads(json_str)
            return {
                '初始状态': parsed.get('初始状态', '').strip(),
                '触发条件': parsed.get('触发条件', '').strip(),
                '执行动作': parsed.get('执行动作', '').strip(),
                'initial_state': parsed.get('初始状态', '').strip(),
                'trigger_condition': parsed.get('触发条件', '').strip(),
                'action': parsed.get('执行动作', '').strip()
            }
        else:
            print(f"大模型解析失败: 无法解析JSON")
            return {'initial_state': '', 'trigger_condition': '', 'action': ''}
    except Exception as e:
        print(f"大模型解析失败: {e}")
        return {'initial_state': '', 'trigger_condition': '', 'action': ''}
    
    
parse_docx('/Users/yangchen/Desktop/HIL测试用例生成/sys2_雨刮洗涤系统功能需求规范V2.docx', '/Users/yangchen/Desktop/car_ut/preprocessed_data/all_cases.jsonl')