import json
from urllib.parse import quote

import pypinyin
from bs4 import BeautifulSoup
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from pymilvus import DataType
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait

from utils.EmbeddingUtil import get_text_embedding
from utils.MySQLClient import MySQLClient

app = FastAPI()

# 配置CORS
origins = ["*"]

download_folder = "./downloads/"

app.add_middleware(
    CORSMiddleware,
    allow_origins=origins,  # 允许访问的源
    allow_credentials=True,
    allow_methods=["*"],  # 允许的HTTP方法，如 GET、POST、PUT等
    allow_headers=["*"],  # 允许的HTTP头
)


# 请求模型
class SpiderRequest(BaseModel):
    data_item_code_id: str
    data_item_code: str
    url: str


# 在文件顶部添加导入
from utils.MilvusClient import MilvusClient


# 在合适的位置添加新方法（例如在SpiderRequest类下方）
def insert_to_milvus(item_code: str, item_name: str, vector: list):
    """
    插入单条数据到Milvus
    """
    milvus_client = MilvusClient()

    # 提取业务字段（已简化）
    business_data = {
        "item_code": item_code,
        "item_name": item_name
    }

    collection = milvus_client.get_collection("government_items") or milvus_client.create_collection(
        collection_name="government_items",
        dim=len(vector) if vector else 768,
        business_fields=[
            {"name": "item_code", "dtype": DataType.VARCHAR, "max_length": 50},
            {"name": "item_name", "dtype": DataType.VARCHAR, "max_length": 100}
        ]
    )

    if vector:
        try:
            milvus_client.insert_data(
                collection=collection,
                vector=vector,
                business_data=business_data
            )
            print(f"Milvus插入成功: {item_code}")
        except Exception as e:
            print(f"Milvus插入失败: {str(e)}")


# 从政务服务网提取 事项信息
def extract_information(request: SpiderRequest):
    # 配置ChromeDriver路径
    # chrome_driver_path = 'C:\Program Files\Google\Chrome\Application\chromedriver.exe'  # 修改为你的ChromeDriver路径

    # 创建Chrome浏览器实例
    service = Service()
    options = webdriver.ChromeOptions()
    options.add_argument('--headless')  # 可选：无头模式，不显示浏览器界面
    driver = webdriver.Chrome(service=service, options=options)

    try:
        # 访问目标网址
        driver.get(request.url)

        # 等待页面加载完成，并等待特定元素加载完成
        WebDriverWait(driver, 10).until(
            EC.presence_of_element_located((By.CLASS_NAME, 'tb2_box'))
        )

        # 使用 JavaScript 直接触发 openWindow 方法
        driver.execute_script("openWindow();")

        # 检查新内容是否加载完成
        new_content = WebDriverWait(driver, 10).until(
            EC.presence_of_element_located((By.ID, 'handlingProcessInfo'))
        )

        # 获取页面内容
        page_source = driver.page_source

        # 解析页面内容
        soup = BeautifulSoup(page_source, 'html.parser')

        # 获取所有 class="tb2_box" 的 div
        tb2_box = soup.find_all('div', class_='tb2_box')
        material_list = soup.find('div', id='materialList')
        result = {}  # 新增结果容器
        if tb2_box:
            result["基本信息"] = get_basic_information(tb2_box[0])
            # result["受理条件"] = get_acceptance_conditions(tb2_box[1])
            # result["设定依据"] = get_table_content(tb2_box[2])
            blcl = get_processing_materials(material_list, driver)
            for item in blcl:
                item.pop("clxs", None)
                if "无形式附件" in str(item.get("clxz")):
                    item.pop("clxz", None)

            result["办理材料"] = blcl
            result["办理流程"] = get_handling_process(tb2_box[4])
            result["补充说明"] = get_handling_process_more(soup)
            # result["流程图"] = get_flow_chart(tb2_box[5])
            # result["结果样本"] = get_table_content(tb2_box[6])
            result["常见问题"] = get_frequently_asked_questions(tb2_box[7])

            # 取消注释并添加办理材料获取（如果需要）
            # if material_list:
            #     result["proc_mate"] = get_processing_materials(material_list, driver)

        else:
            print("未找到任何 tb2_box div")

        return result  # 新增返回语句

    finally:
        # 关闭浏览器
        driver.quit()


# 获取基本信息方法
def get_basic_information(tb2_boxes):
    if tb2_boxes:
        # 获取第一个 tb2_box div 中所有的 td 标签
        td_tags = tb2_boxes.find_all('td')

        # 创建 BasicInformation 对象数组
        basic_info_list = []

        # 处理 td 标签
        for i in range(0, len(td_tags), 2):
            td_title = td_tags[i].get_text(strip=True)
            td_value = td_tags[i + 1].get_text(strip=True) if i + 1 < len(td_tags) else ""
            basic_info = {td_title:td_value}
            basic_info_list.append(basic_info)

        # 打印 BasicInformation 对象数组的 JSON 格式
        return basic_info_list


# 获取受理条件方法
def get_acceptance_conditions(tb2_boxes):
    if tb2_boxes:
        # 获取第二个 tb2_box div 中所有的 td 标签
        td_tags = tb2_boxes.find_all('td')
        # 创建 BasicInformation 对象数组
        acceptance_conditions_list = []
        # 打印每个 td 标签的内容
        for i, td in enumerate(td_tags, 1):
            acceptance_conditions_list.append(td.get_text(strip=True))
        # 去掉 acceptance_conditions_list 的第一个元素
        acceptance_conditions_list.pop(0)
        return acceptance_conditions_list
    else:
        print("未找到第二个 tb2_box div")


# 办理流程
def get_handling_process(tb2_box):
    if tb2_box:
        # 获取 tb2_box div 中 thead 和 tbody 的内容
        thead = tb2_box.find('thead')
        tbody = tb2_box.find('tbody')

        if thead and tbody:
            # 提取表头
            header_tds = thead.find_all('td')
            keys = [chinese_to_pinyin_initials(td.get_text(strip=True)) if not td.get_text(
                strip=True).isascii() else td.get_text(strip=True).strip() for td in header_tds]
            # 获取表格数据
            data_list = []
            tr_tags = tbody.find_all('tr')

            for tr in tr_tags:
                values = [extract_td_content(td) for td in tr.find_all('td')]
                data_obj = dict(zip(keys, values))
                data_list.append(data_obj)

            return data_list
        else:
            print("未找到 thead 或 tbody")
    else:
        print("未找到办理流程的 tb2_box")


# 办理材料
def get_processing_materials(tb2_box, driver):
    if tb2_box:
        # 获取 tb2_box div 中 thead 和 tbody 的内容
        thead = tb2_box.find('thead')
        tbody = tb2_box.find('tbody')

        if thead and tbody:
            headers = [chinese_to_pinyin_initials(th.get_text(strip=True)) for th in thead.find_all('td')]
            materials = []

            for tr in tbody.find_all('tr'):
                tds = tr.find_all('td')
                material = {}
                for header, td in zip(headers, tds):
                    if 'onclick' in td.attrs and 'queryInfo(' in td['onclick']:
                        # # 提取下标 i
                        # i = td['onclick'].split('queryInfo(')[1].split(')')[0]
                        # # 使用 JavaScript 点击操作
                        # driver.execute_script(f"queryInfo({i});")
                        # # 等待弹窗加载
                        # WebDriverWait(driver, 10).until(
                        #     EC.presence_of_element_located((By.ID, 'materialsOtherInfo'))
                        # )
                        # # 获取弹窗内容
                        # material.update(get_materials_other_info(driver))
                        # # 关闭弹窗
                        # driver.execute_script("clownWindow();")
                        print("跳过处理材料的其他信息")
                    else:
                        material[header] = convert_material_url(td)

                materials.append(material)

            return materials
        else:
            print("未找到 thead 或 tbody")
    else:
        print("未找到办理材料的 tb2_box")


# 获取办理材料更多信息
def get_materials_other_info(driver):
    WebDriverWait(driver, 10).until(
        EC.presence_of_element_located((By.ID, 'materialsOtherInfo'))
    )
    other_info_html = driver.page_source
    other_info_soup = BeautifulSoup(other_info_html, 'html.parser')
    materials_other_info = other_info_soup.find('div', id='materialsOtherInfo')
    info_dict = {}

    if materials_other_info:
        ths = materials_other_info.find_all('th')
        tds = materials_other_info.find_all('td')
        for i in range(0, len(tds)):
            key = chinese_to_pinyin_initials(ths[i].get_text(strip=True))
            value = tds[i].get_text(strip=True)
            info_dict[key] = value

    return info_dict


# 获取办理流程更多方法
def get_handling_process_more(soup):
    handling_process_info = soup.find('div', id='handlingProcessInfo')
    if handling_process_info:
        tbody = handling_process_info.find('tbody')

        if tbody:
            basic_info_list = []
            tr_tags = tbody.find_all('tr')

            for tr in tr_tags:
                tds = tr.find_all('td')
                if len(tds) >= 1:
                    chinese_field = tds[0].get_text(strip=True)
                    field_value = extract_td_content(tds[1])
                    field_pinyin = chinese_to_pinyin_initials(chinese_field)

                    # basic_info = BasicInformation(
                    #     ChineseFields=chinese_field,
                    #     FieldsValue=field_value,
                    #     Fields=field_pinyin,
                    #     width=100
                    # )
                    basic_info={chinese_field:field_value}
                    basic_info_list.append(basic_info)

            return basic_info_list
        else:
            print("未找到 tbody")
    else:
        print("未找到 handlingProcessInfo div")


# 获取设定依据/办理流程/结果样本方法
def get_table_content(tb2_boxe):
    if tb2_boxe:
        # 获取  tb2_box div 中 tbody 下面的所有 tr 标签
        tbody = tb2_boxe.find('tbody')
        if tbody:
            tr_tags = tbody.find_all('tr')

            if len(tr_tags) > 0:
                setting_basis_list = []
                # 获取表头
                header_tds = tr_tags[0].find_all('td')
                keys = [chinese_to_pinyin_initials(td.get_text(strip=True)) if td.get_text(
                    strip=True).isascii() == False else td.get_text(strip=True).strip() for td in header_tds]

                # 获取表格数据
                data_list = []
                for tr in tr_tags[1:]:
                    values = [extract_td_content(td) for td in tr.find_all('td')]
                    data_obj = dict(zip(keys, values))
                    data_list.append(data_obj)

                return data_list

            else:
                print("第三个 tb2_box div 的 tbody 中未找到任何 tr 标签")
        else:
            print("第三个 tb2_box div 中未找到 tbody")
    else:
        print("未找到第三个 tb2_box div")


# 获取常见问题方法
def get_frequently_asked_questions(tb2_boxe):
    if tb2_boxe:
        # 获取第七个 tb2_box div 中 tbody 下面的所有 tr 标签
        tbody = tb2_boxe.find('tbody')
        if tbody:
            tr_tags = tbody.find_all('tr')

            if len(tr_tags) > 0:
                # 初始化对象数组
                qa_list = []

                # 处理问答对象
                for i in range(0, len(tr_tags), 2):
                    if i + 1 < len(tr_tags):
                        question_tds = tr_tags[i].find_all('td')
                        answer_tds = tr_tags[i + 1].find_all('td')
                        if len(question_tds) > 1 and len(answer_tds) > 1:
                            question_key = chinese_to_pinyin_initials(question_tds[0].get_text(strip=True)) if not \
                                question_tds[0].get_text(strip=True).isascii() else question_tds[0].get_text(
                                strip=True).strip()
                            question_value = extract_td_content(question_tds[1])
                            answer_key = chinese_to_pinyin_initials(answer_tds[0].get_text(strip=True)) if not \
                                answer_tds[0].get_text(strip=True).isascii() else answer_tds[0].get_text(
                                strip=True).strip()
                            answer_value = extract_td_content(answer_tds[1])
                            qa_list.append({
                                question_key: question_value,
                                answer_key: answer_value
                            })

                return qa_list
            else:
                print("第七个 tb2_box div 的 tbody 中未找到任何 tr 标签")
        else:
            print("第七个 tb2_box div 中未找到 tbody")
    else:
        print("未找到第七个 tb2_box div")


# 流程图
def get_flow_chart(tb2_box):
    if tb2_box:
        # 初始化存储流程图链接的列表
        img_src_list = []

        # 查找所有 img 标签
        img_tags = tb2_box.find_all('img')
        # 提取每个 img 标签的 src 属性值
        for img in img_tags:
            src = img.get('src', '').strip()
            if src:
                src = download_file(src)
                img_src_list.append(src)

        # 返回 img 标签的 src 内容列表
        return img_src_list
    else:
        print("未找到流程图")


def chinese_to_pinyin_initials(chinese_str):
    """将中文转换为拼音首字母"""
    return ''.join([p[0][0] for p in pypinyin.pinyin(chinese_str, style=pypinyin.Style.FIRST_LETTER)])


# 提取 td 标签的内容
def extract_td_content(td):
    a_tag = td.find('a')
    if a_tag and 'href' in a_tag.attrs:
        text = a_tag['href']
        # print(f"发现链接:{text}")
        text = download_file(text)
        return '' if text == 'javascript:void(0);' else text
    else:
        return td.get_text(strip=True)


def download_file(url):
    return url


# 转换办理材料下载地址
def convert_material_url(td):
    a_tags = td.find_all('a')
    links = []

    if len(a_tags) > 1:
        for a_tag in a_tags:
            if 'onclick' in a_tag.attrs and "fileDownload" in a_tag['onclick']:
                onclick_content = a_tag['onclick']
                doc_id, filename = onclick_content.replace("fileDownload(", "").replace(")", "").replace("'", "").split(
                    ',')
                filename_encoded = quote(filename)
                annex_url = f"https://www.jxzwfww.gov.cn/jxzw/qstb/download.do?docId={doc_id}&fileName={filename_encoded}"
                # annex_url = download_file(annex_url, download_folder)
                links.append({
                    "annex": a_tag.text,
                    "annexUrl": annex_url
                })

    elif len(a_tags) == 1 and 'href' in a_tags[0].attrs:
        text = a_tags[0]['href']
        return '' if text == 'javascript:void(0);' else text

    if not links:
        return td.get_text(strip=True)

    return links


if __name__ == '__main__':
    item_code = "360400-000602070000-XK-050-01"
    item_name = "《食品生产许可证》新发"
    # result = extract_information(SpiderRequest(data_item_code_id="2", data_item_code="360400-000602070000-XK-050-01",
    #                                            url="https://jj.jxzwfww.gov.cn/jxzw/bszn/index.do?itemCode=360400-000602070000-XK-050-01&webId=5&flag=gj"))

    # item_content = json.dumps(result, ensure_ascii=False)
    # print(item_content)
    vector = get_text_embedding(item_name)
    insert_to_milvus(item_code, item_name, vector)

    # item_data = {
    #     'item_code': item_code,
    #     'item_name': item_name,
    #     'content': item_content
    # }
    # mysql_client = MySQLClient()
    # mysql_client.insert_crawled_item(item_data)