# https://insurance.hexun.com/bxhyzx/index-657.html
import requests
from pyquery import PyQuery as pq
import time
import pandas as pd
import os

cookies = {
    "ASL": "19870,0000q,67241811",
    "ADVC": "3cfe2ec6645920",
    "ADVS": "3cfe2ec6645920",
    "hxck_cd_sourceteacher": "sR%2FuPcnSSZVIdShwHag3RAnrY9aauRbMjEnRBtq%2FNF1ooDP7obDVPgaQGWxsj76JBYWpWct2QaLhLGlGGFMNCUkUFo7IumEiioDvtuc9HEKV5pgWGkfxDS1ag5k8LMT6nqD441doVVjhZ4h3xPXMVQEkK2qbvkbyf0Z9Rw1RLmQFX45BnjCJcQ%3D%3D",
    "hxck_cd_channel_order_mark1": "4001000001_auto",
    "hxck_cd_channel": "tKK6EMkJ7JK75WOJ%2FqluxbbMrhZQZtn9if6%2FTggkwv3nf3P%2FebDCkk56hGWRbNmSqaeKrwCfBPEeWd5BYbz4cNPj5uRvcet2mBcvjKkNS0YkIIkUNNldxyCbuc0tvuf58XDZnfn7rP4%3D",
    "hxwapcookieid": "98a80xen0y5f4a8hi",
    "HexunTrack": "SID=98a80xen0y5f4a8hi&CITY=0&TOWN=0",
    "Hm_lvt_81ff19c9eb1c05cdfeacb05d2036f066": "1716814800",
    "Hm_lpvt_81ff19c9eb1c05cdfeacb05d2036f066": "1716814800",
    "appToken": "pc%2Cother%2Cchrome%2ChxAppSignId59758575156678731716814800319%2CPCDUAN",
    "hexun_popuped": "2024-05-27",
    "Hm_lvt_acd633708f7e9eb27c1e42b150fa892c": "1716814851",
    "Hm_lpvt_acd633708f7e9eb27c1e42b150fa892c": "1716814859",
}

headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
    "Cache-Control": "max-age=0",
    "Connection": "keep-alive",
    # 'Cookie': 'ASL=19870,0000q,67241811; ADVC=3cfe2ec6645920; ADVS=3cfe2ec6645920; hxck_cd_sourceteacher=sR%2FuPcnSSZVIdShwHag3RAnrY9aauRbMjEnRBtq%2FNF1ooDP7obDVPgaQGWxsj76JBYWpWct2QaLhLGlGGFMNCUkUFo7IumEiioDvtuc9HEKV5pgWGkfxDS1ag5k8LMT6nqD441doVVjhZ4h3xPXMVQEkK2qbvkbyf0Z9Rw1RLmQFX45BnjCJcQ%3D%3D; hxck_cd_channel_order_mark1=4001000001_auto; hxck_cd_channel=tKK6EMkJ7JK75WOJ%2FqluxbbMrhZQZtn9if6%2FTggkwv3nf3P%2FebDCkk56hGWRbNmSqaeKrwCfBPEeWd5BYbz4cNPj5uRvcet2mBcvjKkNS0YkIIkUNNldxyCbuc0tvuf58XDZnfn7rP4%3D; hxwapcookieid=98a80xen0y5f4a8hi; HexunTrack=SID=98a80xen0y5f4a8hi&CITY=0&TOWN=0; Hm_lvt_81ff19c9eb1c05cdfeacb05d2036f066=1716814800; Hm_lpvt_81ff19c9eb1c05cdfeacb05d2036f066=1716814800; appToken=pc%2Cother%2Cchrome%2ChxAppSignId59758575156678731716814800319%2CPCDUAN; hexun_popuped=2024-05-27; Hm_lvt_acd633708f7e9eb27c1e42b150fa892c=1716814851; Hm_lpvt_acd633708f7e9eb27c1e42b150fa892c=1716814859',
    "Sec-Fetch-Dest": "document",
    "Sec-Fetch-Mode": "navigate",
    "Sec-Fetch-Site": "cross-site",
    "Sec-Fetch-User": "?1",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36",
    "sec-ch-ua": '"Google Chrome";v="125", "Chromium";v="125", "Not.A/Brand";v="24"',
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": '"Windows"',
}


result_df = pd.DataFrame()
fileName = "和讯"


def fetch_data():
    urls = [
        f"https://insurance.hexun.com/bxhyzx/index-{x}.html"
        for x in range(657, 600, -1)
    ]
    for url in urls:
        print(url)
        try:
            response = requests.get(url, cookies=cookies, headers=headers)
            htmlStr = response.content.decode("gbk")
            # with open("z.html", "w", encoding="utf-8") as fp:
            #     fp.write(
            #         response.content.decode('gbk')
            #     )  # 浏览器控制台通过document.charset查看编码方式
            doc = pq(htmlStr)
            items = doc("ul>li")
            for item in items:
                o = {}
                date = pq(item)("span").text()[1:-1]
                href = pq(item)("a").attr("href")
                content = pq(item)("a").text()
                o["date"] = date
                o["href"] = href
                o["content"] = content
                # 写入当前数据
                global result_df
                result_df = pd.concat([result_df, pd.DataFrame([o])])

            time.sleep(1)
        except:
            pass

    save()


def save():
    path = os.path.join(os.path.dirname(__file__), "output", f"{fileName}.xlsx")
    result_df.to_excel(
        path, index=False, header=["日期", "新闻链接", "内容"]
    )  # 输出总的结果
    print("下载完成")


if __name__ == "__main__":
    fetch_data()
