import requests
import os
import json
import csv
import re
# from docx import Document
from PIL import Image
import io
import pytesseract
import base64
import pdfplumber
from itertools import groupby

# def parse_rich_word(file_path):
#     try:
#         if file_path.endswith('.pdf'):
#             with pdfplumber.open(file_path) as pdf:
#                 text = "\n".join([page.extract_text() for page in pdf.pages])
#         elif file_path.endswith('.docx'):
#             doc = Document(file_path)
#             text = "\n".join([para.text for para in doc.paragraphs])
#         else:
#             raise ValueError("Unsupported file format")
#         # 关键修复1：清理特殊字符
#         cleaned_text = text.replace("\uf06c", "◆ ").replace('"', "'")  # 避免双引号冲突
#         # 关键修复2：结构化段落
#         sections = [sec.strip() for sec in cleaned_text.split("\n\n") if sec.strip()]
#         # 关键修复3：确保ASCII输出转义
#         return {
#             "content": cleaned_text,
#             "sections": sections
#         }
#
#     except Exception as e:
#         print(f"文件处理失败：{str(e)}")
#         return {"error": str(e)}

#1. 解析word文档
def parse_rich_word(file_path):
    """增强版文档解析函数，支持PDF分栏、表格和图片OCR"""

    def pdf_column_parser(page):
        """处理PDF分栏排版"""
        chars = page.chars
        if not chars:
            return ""

        # 按X坐标分组识别分栏
        sorted_chars = sorted(chars, key=lambda x: x['x0'])
        columns = []
        current_col = []
        prev_x1 = 0

        for char in sorted_chars:
            if current_col and (char['x0'] - prev_x1 > 20):  # 列间距超过20视为新列
                columns.append(current_col)
                current_col = []
            current_col.append(char)
            prev_x1 = char['x1']
        if current_col:
            columns.append(current_col)

        # 按列重组文本
        column_texts = []
        for col in columns:
            col.sort(key=lambda x: (x['top'], x['x0']))
            lines = []
            for k, g in groupby(col, lambda x: x['top'] // 10):
                line = ''.join([c['text'] for c in sorted(g, key=lambda x: x['x0'])])
                lines.append(line)
            column_texts.append('\n'.join(lines))

        return '\n\n'.join(column_texts)

    def extract_pdf_tables(page):
        """提取PDF表格数据"""
        tables = []
        for table in page.extract_tables():
            formatted_table = []
            for row in table:
                cleaned_row = [re.sub(r'\s+', ' ', cell.replace('\n', ' ')) for cell in row]
                formatted_table.append(cleaned_row)
            tables.append(formatted_table)
        return tables

    try:
        #前置文件校验
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"文件不存在: {file_path}")
        if os.path.getsize(file_path) == 0:
            raise ValueError("文件内容为空")
        doc_data = {
            "content": "",
            "sections": [],
            "tables": [],
            "images": []
        }

        if file_path.endswith('.pdf'):
            with pdfplumber.open(file_path) as pdf:
                full_text = []
                for page_idx, page in enumerate(pdf.pages):
                    try:
                        raw_text = pdf_column_parser(page) or page.extract_text()
                        text = str(raw_text) if raw_text else ""  # 保证文本为字符串类型
                        full_text.append(text)

                        # 解析表格
                        tables = extract_pdf_tables(page)
                        if tables:
                            doc_data["tables"].extend(tables)
                    except Exception as page_error:
                        print(f"第 {page_idx + 1} 页处理失败：{str(page_error)}")
                        full_text.append("")

                    # OCR识别图片
                    for img in page.images:
                        try:
                            # 新版本pdfplumber的图片数据提取方式
                            img_obj = img["stream"].get_data()
                            image = Image.open(io.BytesIO(img_obj))
                            ocr_text = pytesseract.image_to_string(image, lang='chi_sim+eng')
                            doc_data["images"].append({
                                "bbox": img['bbox'],
                                "text": ocr_text.strip()
                            })
                        except Exception as img_e:
                            print(f"图片处理失败：{str(img_e)}")
                            continue
                full_content = "\n".join(filter(None, full_text))  # 过滤空段落
                full_content = full_content if full_content.strip() else ""  # 处理全空文档

        # elif file_path.endswith('.docx'):
        #     doc = Document(file_path)
        #     full_content = []
        #     tables = []
        #
        #     # 解析段落
        #     for para in doc.paragraphs:
        #         if para.text.strip():
        #             full_content.append(para.text.strip())
        #
        #     # 解析表格
        #     for table in doc.tables:
        #         table_data = []
        #         for row in table.rows:
        #             row_data = [cell.text.strip() for cell in row.cells]
        #             table_data.append(row_data)
        #         tables.append(table_data)
        #
        #     # 解析图片
        #     image_index = 0
        #     for rel in doc.part.rels.values():
        #         if "image" in rel.target_ref:
        #             image_index += 1
        #             try:
        #                 img_obj = rel.target_part.blob
        #                 image = Image.open(io.BytesIO(img_obj))
        #                 ocr_text = pytesseract.image_to_string(image)
        #                 doc_data["images"].append({
        #                     "index": image_index,
        #                     "text": ocr_text.strip()
        #                 })
        #             except Exception as img_e:
        #                 print(f"Word图片处理失败: {str(img_e)}")
        #
        #     doc_data["tables"] = tables
        #     full_content = "\n".join(full_content)
        else:
            raise ValueError("Unsupported file format")

        # 统一文本处理
        cleaned_text = re.sub(r'(?<!\n)\n(?!\n)', ' ', full_content)  # 处理软换行
        cleaned_text = re.sub(r'\uf06c', '◆ ', cleaned_text)  # 处理特殊符号
        cleaned_text = re.sub(r'\s{2,}', ' ', cleaned_text)  # 处理多余空格

        # 智能分段（根据标题前缀）
        sections = []
        current_section = []
        for line in cleaned_text.split('\n'):
            if re.match(r'^[一二三四五六七八九十]、', line) or re.match(r'^（[甲乙丙丁]）', line):
                if current_section:
                    sections.append('\n'.join(current_section))
                    current_section = []
            current_section.append(line.strip())
        if current_section:
            sections.append('\n'.join(current_section))

        doc_data.update({
            "content": cleaned_text,
            "sections": sections
        })
        return doc_data

    except Exception as e:
        print(f"文件处理失败：{str(e)}")
        return {"error": str(e)}

# 2. 调用DeepSeek API生成测试用例
def generate_test_cases(api_key, requirements):
    headers = {
        "Authorization": f"Bearer {api_key}",
        "Content-Type": "application/json"
    }

    system_prompt = """你是一个专业软件测试工程师，请严格按照以下Excel模板结构生成测试用例：

    | 模块           | 测试点                 | 前置条件               | 测试步骤                                                                 | 预期结果                                |
    |----------------|------------------------|------------------------|--------------------------------------------------------------------------|----------------------------------------|

    ### 要求：
    0. **单一验证原则**：每条用例仅验证一个功能点，若有同类型的功能点需要拆分成多个测试用例
    1. **模块**：包含B端/C端分类（如`B端-注册和登录`，等等）
    2. **测试点**：必须包含`功能测试`（不用在测试点前面加上功能测试），可包含/`性能测试`/`兼容性测试`/`可靠性测试`/`易用性测试`/`安全测试`/`其他`分类
    3. **测试步骤**：用自然语言分步骤描述，步骤间用`<br>`分隔，尽量言语简洁，避免使用专业术语
    4. **预期结果**：用自然语言描述预期结果
    5. **覆盖范围**：要求场景覆盖无遗漏
    6. 用JSON格式返回，键名使用中文
    """

    payload = {
        "model": "deepseek-chat",
        #"model": "deepseek-reasoner",  # DeepSeek-R1 深度思考模型，不支持JOSN格式
        "temperature": 0.5,
        "max_tokens": 3000,
        "messages": [
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": str(requirements)[:60000]}
        ],
        "response_format": {"type": "json_object"}
    }

    try:
        # 启用请求重试
        session = requests.Session()
        adapter = requests.adapters.HTTPAdapter(max_retries=3)
        session.mount('https://', adapter)

        response = session.post(
            "https://api.deepseek.com/v1/chat/completions",
            headers=headers,
            json=payload,
            timeout=120
        )

        # 增强HTTP错误处理
        if response.status_code != 200:
            print(f"API错误:{response.status_code} 正文:{response.text[:300]}")
            return []

        raw_data = response.json()
        print("调试数据:", json.dumps(raw_data, indent=2))

        content = raw_data["choices"][0]["message"]["content"]

        # JSON解析容错处理
        parsed = json.loads(content)

        # 统一处理测试步骤换行符
        if "测试用例" in parsed:
            for case in parsed["测试用例"]:
                if '测试步骤' in case:
                    case['测试步骤'] = case['测试步骤'].replace('\n', '<br>')
            return parsed["测试用例"]
        else:
            print("响应结构异常: 未找到'测试用例'字段")
            return []

    except Exception as e:
        print(f"综合错误: {str(e)}")
        return []

# 新增文件名生成函数
def generate_sequential_filename(base_path):
    """
    生成带有序号的文件名，格式：基础名_序号.扩展名
    示例：/path/output.csv -> /path/output_01.csv
    """
    dir_name = os.path.dirname(base_path)
    base_name, ext = os.path.splitext(os.path.basename(base_path))

    # 查找已存在文件的最大序号
    max_num = 0
    pattern = re.compile(rf"^{re.escape(base_name)}_(\d+){re.escape(ext)}$")

    for filename in os.listdir(dir_name):
        match = pattern.match(filename)
        if match:
            current_num = int(match.group(1))
            if current_num > max_num:
                max_num = current_num

    # 生成新序号
    new_num = max_num + 1
    new_filename = f"{base_name}_{new_num:02d}{ext}"
    return os.path.join(dir_name, new_filename)


# 3. 保存到CSV文件
def save_to_csv(test_cases, output_path):
    if not test_cases:
        return False

    try:
        with open(output_path, 'w', newline='', encoding='utf-8') as f:
            writer = csv.DictWriter(f, fieldnames=test_cases[0].keys())
            writer.writeheader()
            writer.writerows(test_cases)
        return True
    except IOError as e:
        print(f"CSV保存失败: {str(e)}")
        return False

# 提供给Flask调用的函数
def process_test_case_generation(word_path, csv_path):
    """
    处理测试用例生成的主函数，供Flask调用
    :param word_path: 上传文件的路径
    :param csv_path: 生成CSV的路径
    :return: 元组 (成功标志, 消息或结果)
    """
    DEEPSEEK_API_KEY = "sk-9b654c9fae924cb8b8a0b15152d97d94"  # 222的key
    # DEEPSEEK_API_KEY='sk - ptfrdqcwryxkvwzgqdocbrryibnaivqpdmdmmhcyyccqcwiz'#硅基流动

    # 1. 检查文件是否存在
    if not os.path.exists(word_path):
        return False, f"❌ 文件不存在: {os.path.abspath(word_path)}"

    # 检查文件格式
    if not word_path.lower().endswith(('.pdf', '.docx', '.doc', '.txt')):
        return False, "❌ 仅支持PDF、DOCX、DOC和TXT格式"

    # 2. 解析word文档
    requirements = parse_rich_word(word_path)
    if not requirements:
        return False, "需求文档解析失败"

    # 3. 调用DeepSeek API生成测试用例
    test_cases = generate_test_cases(DEEPSEEK_API_KEY, requirements)
    if not test_cases:
        return False, "测试用例生成失败"

    # 4. 保存到CSV文件
    if not isinstance(test_cases, list) or len(test_cases) == 0:
        return False, "生成的测试用例数据无效"

    # 新增数据结构校验
    required_fields = ['模块', '测试点', '前置条件', '测试步骤', '预期结果']
    for idx, case in enumerate(test_cases):
        if not all(field in case for field in required_fields):
            missing = [f for f in required_fields if f not in case]
            return False, f"测试用例第{idx + 1}条缺少必要字段: {', '.join(missing)}"
        # 新增字段内容非空校验
        for field in required_fields:
            if not isinstance(case.get(field), str) or len(case[field].strip()) == 0:
                return False, f"测试用例第{idx + 1}条字段'{field}'内容为空或类型错误"

    if save_to_csv(test_cases, csv_path):
        return True, test_cases
    else:
        return False, "CSV文件保存失败"
