import hashlib
import json
import os
import re
from pathlib import Path
from typing import Dict, List, Optional
from lxml import etree
import requests
# from openai import OpenAI
from deal_result import load_json_result, write_result_excel_v3
import sseclient
import session
import time
import checkurl


class DeepSeekCodeAnalyzer:
    def __init__(self):
        self.api_key = 'GKFleIcPwelfdAOedftFQfbNswbBfgNnivbJrlbQUkicXNIJeHMNCDoqnCTTwHacqqkxDkeUFGKOxFXkFTGiUNoCBIrKYNnLoklakAskjkZLajXCKyNGgcPdMuARODOT'
        self.api_url = "https://wss.lke.cloud.tencent.com/v1/qbot/chat/sse"
        self.message = ''
        self.visitor_biz_id = "202403130001"  # 访客 ID（外部系统提供，需确认不同的访客使用不同的 ID）
        self.streaming_throttle = 1  # 节流控制
        self.cache_dir = Path('.code_cache')
        if not os.path.exists(self.cache_dir):
            os.mkdir(self.cache_dir)
        self.sort_txt_path = './.code_cache/sort.txt'
        self.session_id = ''
        # 先删除sort.txt
        try:
            os.remove(self.sort_txt_path)
        except FileNotFoundError:
            pass
        # 然后新建目录
        # self.cache_dir.mkdir(exist_ok=True)

    def analyze_html_file(self, html_file: str, article_url, product, name, fail_urls) -> Dict:
        """分析HTML文件"""
        cache_key = self._generate_cache_key(article_url)
        # 写入sort.txt后续按序号读取json文件
        with open(self.sort_txt_path, 'a', encoding='utf-8') as f:
            f.write(name + "---" + cache_key + '.json\n')

        with open(html_file, 'r', encoding='utf-8') as f:
            cleaned_content = []  # 存储处理后的每一行
            for line in f:
                if not re.search(r'!\[.*?\]\(.*?\)', line):
                    cleaned_content.append(line)
            html = ''.join(cleaned_content)
        result = self.analyzer_tencent_cloud(html, article_url)
        result['类型'] = product
        result['文章名称'] = name
        result['链接问题'] = fail_urls
        self.save_result_to_json(cache_key, result)
        return result

    def analyze_md_content(self, md_content, article_url, product, name):
        """分析MarkDown"""
        cache_key = self._generate_cache_key(article_url)
        # 写入sort.txt后续按序号读取json文件
        with open(self.sort_txt_path, 'a', encoding='utf-8') as f:
            f.write(cache_key + '.json\n')
        result = self.analyzer_tencent_cloud(md_content, article_url)
        result['类型'] = product
        result['文章名称'] = name
        self.save_result_to_json(cache_key, result)

        return result

    def _generate_cache_key(self, url: str) -> str:
        """生成缓存文件名"""
        return hashlib.md5(url.encode()).hexdigest()

    def analyzer_tencent_cloud(self, html, article_url) -> Dict:
        """调用DeepSeek API分析代码语法"""
        cache_key = self._generate_cache_key(article_url)
        cached_result = self._load_from_cache(cache_key)
        if cached_result:
            print(cached_result)
            return cached_result

        prompt = f"""请根据已提供的提示词使用指令跟随回答意图结合联网搜索分析下面文章，且按照提示词的要求返回json格式,无需关注markdown格式问题,待分析文章如下：\n{html}"""
        prompt = f"""待分析文章如下：\n{html}"""
        self.message = prompt
        self.session_id = session.get_session()
        req_data = {
            "content": "",
            "bot_app_key": self.api_key,
            "visitor_biz_id": self.visitor_biz_id,
            "session_id": self.session_id,
            "streaming_throttle": self.streaming_throttle,
            'search_network': 'enable',
        }
        data = {}
        try:
            req_data['content'] = prompt
            resp = requests.post(url=self.api_url, data=json.dumps(req_data), stream=True,
                                 headers={"Accept": "text/event-stream"})
            client = sseclient.SSEClient(resp)

            for ev in client.events():
                data = json.loads(ev.data)
                print(data)
            print(data['payload']['content'][8: -4])
            text = data['payload']['content']
            start = text.find('{')
            end = text.rfind('}')
            content = text[start:end + 1]
            content = content.replace('="', '=')
            content = content.replace('">', '>')
            # content = data['payload']['content'][8:].split('```')[0]
            result = json.loads(content)
            return result
        except Exception as e:
            return {
                "error": f"API调用失败: {str(e)}",
                "data": data
            }

    def save_result_to_json(self, key: str, result: Dict):
        with open(self.cache_dir / f'{key}.json', 'w', encoding='utf-8') as f:
            json.dump(result, f, ensure_ascii=False, indent=2)

    def _load_from_cache(self, key: str) -> Optional[Dict]:
        cache_file = self.cache_dir / f'{key}.json'
        if cache_file.exists():
            with open(cache_file, 'r', encoding='utf-8') as f:
                return json.load(f)
        return None


def main():
    ds = DeepSeekCodeAnalyzer()
    # ds.analyze_html_file('test/样例.md',
    #                      'https://developer.huawei.com/consumer/cn/doc/harmonyos-guides/serializable-overview', '指南',
    #                      '样例')
    # for i in range(1, 2):
    #     ds.analyze_html_file(f'test/{i}-mod.md',
    #                          f'https://developer.huawei.com/consumer/cn/doc/harmonyos-guides/serializable-overview{i}',
    #                          '指南',
    #                          f'{i}-mod')
    with open('../test/html/1.html', 'r', encoding='utf-8') as f:
        html_content = f.read()
    result = checkurl.main(html_content, '')
    ds.analyze_html_file(f'test/样例.md',
                         f'https://developer.huawei.com/consumer/cn/doc/harmonyos-guides/serializable-overviewtestmd',
                         '指南',
                         f'test.md', result)

    analyzer_results = load_json_result('.code_cache')
    write_result_excel_v3(analyzer_results)


if __name__ == '__main__':
    s = time.time()
    main()
    e = time.time()
    print(e - s)
