import json
import re
from pathlib import Path

import httpx
from lxml import etree
from urllib.parse import urljoin
import random
from DrissionPage import ChromiumPage


class SouGouWXSearch:
    def __init__(self):
        self.client = httpx.Client(
            follow_redirects=True
        )
        self.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'no-cache',
            'Connection': 'keep-alive',
            'Pragma': 'no-cache',
            'Referer': 'https://weixin.sogou.com/weixin?type=2&s_from=input&query=react&ie=utf8&_sug_=n&_sug_type_=',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'same-origin',
            'Sec-Fetch-User': '?1',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
            'sec-ch-ua': '"Chromium";v="134", "Not:A-Brand";v="24", "Google Chrome";v="134"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"Windows"',
            # 'Cookie': 'ABTEST=3|1742349864|v1; SNUID=1B192905787148DF07BEFCCF7844CA70; SUID=63615E72A052A20B0000000067DA2628; IPLOC=CN3100; SUID=63615E724752A20B0000000067DA2629; SUV=007118B3725E616367DA2629DB93D638; ariaDefaultTheme=undefined',
        }
        self.count = 0
        self.name = ''

    def search(self, name, page=1):
        self.name = name
        params = {
            'type': '2',
            's_from': 'input',
            'query': name,
            'ie': 'utf8',
            '_sug_': 'n',
            '_sug_type_': '',
        }
        if page > 1:
            params['page'] = page
        response = self.client.get('https://weixin.sogou.com/weixin', params=params, headers=self.headers)
        tree = etree.HTML(response.text)
        with open('test.html', 'w', encoding='utf-8') as f:
            f.write(response.text)
        for data_info in tree.xpath('//ul[@class="news-list"]//div[@class="txt-box"]'):
            title = ''.join(data_info.xpath('.//a//text()'))
            url = data_info.xpath('.//a/@href')[0]
            describe = ''.join(data_info.xpath('.//p//text()'))
            new_url = urljoin(str(response.url), url)
            content, save_path = self.get_wx_url(new_url)
            if content is None and save_path is None:
                continue
            with open(f'wx/{name}.jsonl', 'a+', encoding='utf-8') as f:
                f.write(json.dumps({
                    'title': title,
                    'url': new_url,
                    'describe': describe,
                    'content': content,
                    'save_path': str(save_path),
                }, ensure_ascii=False) + '\n')
        if page < 10:
            if page % 2 == 0:
                self.client.close()
                self.client = httpx.Client(
                    follow_redirects=True
                )
            print(page)
            self.search(name, page + 1)

    def get_wx_url(self, url):
        href = url
        b = random.randint(1, 100)
        a = href.find("url=")
        c = href.find("&k=")

        if a != -1 and c == -1:
            start_index = a + 4 + 21 + b
            a_char = href[start_index:start_index + 1]
            href += f"&k={b}&h={a_char}"

        self.headers['Referer'] = url
        cookies = {
            'ABTEST': '3|1742349864|v1',
            'SUID': '63615E72A052A20B0000000067DA2628',
            'IPLOC': 'CN3100',
            'SUID': '63615E724752A20B0000000067DA2629',
            'SUV': '007118B3725E616367DA2629DB93D638',
            'ariaDefaultTheme': 'undefined',
            'PHPSESSID': 'n6slde4fh6m143h0ijd8kib6h7',
            'seccodeErrorCount': '1|Wed, 19 Mar 2025 03:21:24 GMT',
            'SNUID': '8A88B79BE9EFD9409BE1588DEA2C00CF',
            'seccodeRight': 'success',
            'successCount': '1|Wed, 19 Mar 2025 03:21:25 GMT',
        }
        jump_response = self.client.get(href, headers=self.headers, cookies=cookies)
        result = re.findall(r"url \+= '(.*?)';", jump_response.text)
        wx_url = ''.join(result).replace('@', '')
        if not wx_url:
            return None, None
        print(repr(wx_url))
        wx_response = self.client.get(wx_url, headers=self.headers, cookies=cookies)
        tree = etree.HTML(wx_response.text)
        try:
            page_content = tree.xpath('//*[@id="page-content"]')[0]
        except IndexError:
            print(wx_response.status_code, wx_response.url)
            return None, None
        cleaning_content = [i.strip() for i in page_content.xpath('.//text()') if i.strip()]
        with open('data.json', 'w', encoding='utf-8') as f:
            json.dump(cleaning_content, f, ensure_ascii=False)
        new_content = '\n'.join(cleaning_content)
        save_path = f'wx/html/{self.name}/{self.count}.html'
        Path(save_path).parent.mkdir(exist_ok=True, parents=True)
        with open(save_path, 'w', encoding='utf-8') as f:
            f.write(etree.tostring(page_content, encoding='utf-8').decode('utf-8'))
        self.count += 1
        return new_content, save_path


if __name__ == '__main__':
    sgwx = SouGouWXSearch()
    sgwx.search('电催化', page=1)
