# import requests
# import json
#
# url = 'https://web3.isolarcloud.com.cn/#/plantList'
# r = requests.get(url)
#
# # 检查响应状态
# if r.status_code == 200:
#     # 保存HTML内容
#     with open('test.html', 'w', encoding='utf-8') as f:
#         f.write(r.text)
#
#     print("HTML content saved successfully")
#     print(f"Content type: {r.headers.get('content-type')}")
# else:
#     print(f"Request failed with status code: {r.status_code}")

# import requests
#
# cookies = {
#     'A1': 'd=AQABBIYeGGkCECIbU3aoBO12WF-7T23vj5EFEgEBAQFwGWkiaViY8HgB_eMCAA&S=AQAAAsYCDZVex_UVNKCmtv3CGaQ',
#     'A3': 'd=AQABBIYeGGkCECIbU3aoBO12WF-7T23vj5EFEgEBAQFwGWkiaViY8HgB_eMCAA&S=AQAAAsYCDZVex_UVNKCmtv3CGaQ',
#     'A1S': 'd=AQABBIYeGGkCECIbU3aoBO12WF-7T23vj5EFEgEBAQFwGWkiaViY8HgB_eMCAA&S=AQAAAsYCDZVex_UVNKCmtv3CGaQ',
#     '_ga': 'GA1.1.680741380.1763188362',
#     'cmp': 't=1763188366&j=0&u=1---',
#     'gpp': 'DBAA',
#     'gpp_sid': '-1',
#     'PRF': 'dock-collapsed%3Dtrue',
#     'fes-ds-session': 'pv%3D2',
#     '_ga_YD9K1W9DLN': 'GS2.1.s1763188361$o1$g1$t1763189005$j60$l0$h0',
# }
#
# headers = {
#     'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
#     'accept-language': 'zh-CN,zh;q=0.9',
#     'cache-control': 'no-cache',
#     'pragma': 'no-cache',
#     'priority': 'u=0, i',
#     'sec-ch-ua': '"Chromium";v="142", "Google Chrome";v="142", "Not_A Brand";v="99"',
#     'sec-ch-ua-mobile': '?0',
#     'sec-ch-ua-platform': '"Windows"',
#     'sec-fetch-dest': 'document',
#     'sec-fetch-mode': 'navigate',
#     'sec-fetch-site': 'same-origin',
#     'sec-fetch-user': '?1',
#     'upgrade-insecure-requests': '1',
#     'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36',
#     # 'cookie': 'A1=d=AQABBIYeGGkCECIbU3aoBO12WF-7T23vj5EFEgEBAQFwGWkiaViY8HgB_eMCAA&S=AQAAAsYCDZVex_UVNKCmtv3CGaQ; A3=d=AQABBIYeGGkCECIbU3aoBO12WF-7T23vj5EFEgEBAQFwGWkiaViY8HgB_eMCAA&S=AQAAAsYCDZVex_UVNKCmtv3CGaQ; A1S=d=AQABBIYeGGkCECIbU3aoBO12WF-7T23vj5EFEgEBAQFwGWkiaViY8HgB_eMCAA&S=AQAAAsYCDZVex_UVNKCmtv3CGaQ; _ga=GA1.1.680741380.1763188362; cmp=t=1763188366&j=0&u=1---; gpp=DBAA; gpp_sid=-1; PRF=dock-collapsed%3Dtrue; fes-ds-session=pv%3D2; _ga_YD9K1W9DLN=GS2.1.s1763188361$o1$g1$t1763189005$j60$l0$h0',
# }
#
# params = {
#     'start': '50',
#     'count': '25',
# }
#
# response = requests.get('https://finance.yahoo.com/markets/crypto/all/', params=params, cookies=cookies,
#                         headers=headers)

# import csv
# import random
# import time
# from typing import List, Dict, Any
#
# import requests
#
# from common import SymbolContent, make_req_params_and_headers
#
# HOST = "https://query1.finance.yahoo.com"
# SYMBOL_QUERY_API_URI = "/v1/finance/screener"
# PAGE_SIZE = 100  # 可选配置（25, 50, 100）
#
#
# def parse_symbol_content(quote_item: Dict) -> SymbolContent:
#     """
#     数据提取
#     :param quote_item:
#     :return:
#     """
#     symbol_content = SymbolContent()
#     symbol_content.symbol = quote_item["symbol"]
#     symbol_content.name = quote_item["shortName"]
#     symbol_content.price = quote_item["regularMarketPrice"]["fmt"]
#     symbol_content.change_price = quote_item["regularMarketChange"]["fmt"]
#     symbol_content.change_percent = quote_item["regularMarketChangePercent"]["fmt"]
#     symbol_content.market_price = quote_item["marketCap"]["fmt"]
#     return symbol_content
#
#
# def fetch_currency_data_list(max_total_count: int) -> List[SymbolContent]:
#     """
#
#     :param max_total_count:
#     :return:
#     """
#     symbol_data_list: List[SymbolContent] = []
#     page_start = 0
#     while page_start <= max_total_count:
#         response_dict: Dict = send_request(page_start=page_start, page_size=PAGE_SIZE)
#         for quote in response_dict["finance"]["result"][0]["quotes"]:
#             parsed_content: SymbolContent = parse_symbol_content(quote)
#             print(parsed_content)
#             symbol_data_list.append(parsed_content)
#         page_start += PAGE_SIZE
#         time.sleep(random.Random().random())
#     return symbol_data_list
#
#
# def send_request(page_start: int, page_size: int) -> Dict[str, Any]:
#     """
#     公共的发送请求的函数
#     :param page_start: 分页起始位置
#     :param page_size: 每一页的长度
#     :return:
#     """
#     print(f"[send_request] page_start:{page_start}")
#     req_url = HOST + SYMBOL_QUERY_API_URI
#     common_params, headers, common_payload_data = make_req_params_and_headers()
#     # 修改分页变动参数
#     common_payload_data["offset"] = page_start
#     common_payload_data["size"] = page_size
#
#     response = requests.post(url=req_url, params=common_params, json=common_payload_data, headers=headers)
#     if response.status_code != 200:
#         raise Exception("发起请求是发生异常，请求发生错误，原因:", response.text)
#     try:
#         response_dict: Dict = response.json()
#         return response_dict
#     except Exception as e:
#         raise e
#
#
# def get_max_total_count() -> int:
#     """
#     获取所有币种总数量
#     :return:
#     """
#     print("开始获取最大的币种数量")
#     try:
#         response_dict: Dict = send_request(page_start=0, page_size=PAGE_SIZE)
#         total_num: int = response_dict["finance"]["result"][0]["total"]
#         print(f"获取到 {total_num} 种币种")
#         return total_num
#     except Exception as e:
#         print("错误信息：", e)
#         return 0
#
#
# def save_data_to_csv(save_file_name: str, currency_data_list: List[SymbolContent]) -> None:
#     """
#     保存数据存储到CSV文件中
#     :param save_file_name: 保存的文件名
#     :param currency_data_list:
#     :return:
#     """
#     with open(save_file_name, mode='w', newline='', encoding='utf-8') as file:
#         writer = csv.writer(file)
#         # 写入标题行
#         writer.writerow(SymbolContent.get_fields())
#         # 遍历数据列表，并将每个币种的名称写入CSV
#         for symbol in currency_data_list:
#             writer.writerow([symbol.symbol, symbol.name, symbol.price, symbol.change_price, symbol.change_percent,
#                              symbol.market_price])
#
#
# def run_crawler(save_file_name: str) -> None:
#     """
#     爬虫主流程
#     :param save_file_name:
#     :return:
#     """
#     # step1 获取最大数据总量
#     max_total: int = get_max_total_count()
#     # step2 遍历每一夜数据并解析存储到数据容器中
#     data_list: List[SymbolContent] = fetch_currency_data_list(max_total)
#     # step3 将数据容器中的数据保存csv
#     save_data_to_csv(save_file_name, data_list)
#
#
# if __name__ == '__main__':
#     timestamp = int(time.time())
#     save_csv_file_name = f"symbol_data_{timestamp}.csv"
#     run_crawler(save_csv_file_name)
