import random
import time
from datetime import datetime
import pandas as pd  # 导入pandas库，用于读取Excel文件和处理数据
import requests
from bs4 import BeautifulSoup
from requests import HTTPError

from x01_stock.xx_util.MyDrissionPage import get_headers_rzrq
from x01_stock.xx_util.mysql_util import insert_db
from x01_stock.xx_util.io_util import create_directory


# 引用自D:\workspace\python_all\a01_stock\a03_FetchHeader.py
# 20240720 测试通过，可以完整的使用。
# 20240721 加入随机数去获取，cookies一次性设置三个，基本可以完成数据爬取的任务
# 20240722 DrissionPage自动获取cookies，不用手动


# 1、爬取同花顺网站当天gnzj的数据，并返回下载后的文件名列表
def fetch_rqrq_by_stock_code(stock_code):
    files_list = []
    # 使用前需手动从同花顺网站获取Cookies替换header中的内容
    # https://data.10jqka.com.cn/funds/gnzjl/#refCountId=data_55f13c2c_254
    headers1 = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'Host': 'data.10jqka.com.cn',
        'Referer': 'https://data.10jqka.com.cn/market/rzrqgg/code/000001/',
        'Connection': 'keep-alive',
        'Cookie': 'v=A8LP5Xc_EAB19wxc3-OAttmFE8Mhk8ateJe60Qzb7jXgX2x99CMWvUgnCuLf;Hm_lpvt_78c58f01938e4d85eaf619eae71b4ed1=1721623464;Hm_lpvt_60bad21af9c824a4a0530d5dbf4357ca=1721623464;Hm_lpvt_f79b64788a4e377c608617fba4c736e2=1721623464;HMACCOUNT=7738D18D94DD39A0;Hm_lvt_f79b64788a4e377c608617fba4c736e2=1721364736,1721364802,1721364890,1721623242;Hm_lvt_60bad21af9c824a4a0530d5dbf4357ca=1721364736,1721364802,1721364890,1721623242;Hm_lvt_78c58f01938e4d85eaf619eae71b4ed1=1721364736,1721364802,1721364890,1721623242;'
        # 这里可以添加更多的header信息
    }
    headers2 = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'Host': 'data.10jqka.com.cn',
        'Referer': 'https://data.10jqka.com.cn/market/rzrqgg/code/000001/',
        'Connection': 'keep-alive',
        'Cookie': 'v=A8LP5Xc_EAB19wxc3-OAttmFE8Mhk8ateJe60Qzb7jXgX2x99CMWvUgnCuLf;Hm_lpvt_78c58f01938e4d85eaf619eae71b4ed1=1721623464;Hm_lpvt_60bad21af9c824a4a0530d5dbf4357ca=1721623464;Hm_lpvt_f79b64788a4e377c608617fba4c736e2=1721623464;HMACCOUNT=7738D18D94DD39A0;Hm_lvt_f79b64788a4e377c608617fba4c736e2=1721364736,1721364802,1721364890,1721623242;Hm_lvt_60bad21af9c824a4a0530d5dbf4357ca=1721364736,1721364802,1721364890,1721623242;Hm_lvt_78c58f01938e4d85eaf619eae71b4ed1=1721364736,1721364802,1721364890,1721623242;'
        # 这里可以添加更多的header信息
    }
    headers3 = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'Host': 'data.10jqka.com.cn',
        'Referer': 'https://data.10jqka.com.cn/market/rzrqgg/code/000001/',
        'Connection': 'keep-alive',
        'Cookie': 'v=A8LP5Xc_EAB19wxc3-OAttmFE8Mhk8ateJe60Qzb7jXgX2x99CMWvUgnCuLf;Hm_lpvt_78c58f01938e4d85eaf619eae71b4ed1=1721623464;Hm_lpvt_60bad21af9c824a4a0530d5dbf4357ca=1721623464;Hm_lpvt_f79b64788a4e377c608617fba4c736e2=1721623464;HMACCOUNT=7738D18D94DD39A0;Hm_lvt_f79b64788a4e377c608617fba4c736e2=1721364736,1721364802,1721364890,1721623242;Hm_lvt_60bad21af9c824a4a0530d5dbf4357ca=1721364736,1721364802,1721364890,1721623242;Hm_lvt_78c58f01938e4d85eaf619eae71b4ed1=1721364736,1721364802,1721364890,1721623242;'
        # 这里可以添加更多的header信息
    }
    headers_update = [headers1, headers2, headers3]
    headers = get_headers_rzrq(headers_update)

    # 下载的路径
    # url = 'https://data.10jqka.com.cn/funds/gnzjl/field/tradezdf/order/desc/page/1/ajax/1/free/'
    file_path = 'D:\\workspace\\python_all\\zz_download_file\\ths_rzrq\\'
    create_directory(file_path)

    file_name_prex = file_path + datetime.now().date().strftime('%Y-%m-%d') + '_'
    html_prex = '''
    <!DOCTYPE html>
    <html>
    <head lang="en">
        <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
        <!–[if lte IE 8]>
        <meta http-equiv="X-UA-Compatible" content="IE=EmulateIE7" />
        <![endif]–>
        <!–[if IE 9]>
        <meta http-equiv="X-UA-Compatible" content="IE=EmulateIE7;IE=9"/>
        <![endif]–>
        <title>概念资金流向排行-数据中心-同花顺财经</title>
    </head>
    <body>
    '''
    html_tail = '</body></html>'
    # print(file_path + file_name_prex + str(1) + '.html')
    # 使用session的方式。
    for i in range(1, 4):
        path = 'https://data.10jqka.com.cn/ajax/rzrqgg/code/' + stock_code + '/order/desc/page/' + str(i) + '/'
        print('准备获取第' + str(i) + '页,路径是：' + path)
        # session = requests.Session()
        header = headers[0]
        # 通过双重随机，防止被服务器记录规则性的动作
        sleep = i * random.randint(1, 9) * random.randint(1, 5)
        max_attempts = 5  # 最大尝试次数
        attempts = 0

        while attempts < max_attempts:
            try:
                file_full_path = file_name_prex + str(i) + '.html'
                print('file_full_path = ' + file_full_path)
                response = requests.get(path, headers=header)
                response.raise_for_status()
                # 将获取到的网页text拼接成html格式存放
                with open(file_full_path, 'w', encoding='utf-8') as file:
                    file.write(html_prex + response.text + html_tail)
                    files_list.append(file_full_path)
                break  # 成功获取数据后退出循环
            except HTTPError as http_err:
                print(f"HTTP error occurred: {http_err}")
            except Exception as err:
                print(f"An error occurred: {err}")
            attempts += 1

            if id(header) == id(headers[0]):
                header = headers[1]
                print('切换到headers2')
            elif id(header) == id(headers[1]):
                header = headers[2]
                print('切换到headers3')
            else:
                header = headers[0]
                print('切换到headers1')

            # 可以选择在这里加入延时，避免过快重试导致的服务器压力
            if attempts < max_attempts:
                attempt_sleep = attempts * random.randint(1, 9)
                print(f"Retrying in {attempt_sleep} seconds...")
                time.sleep(attempt_sleep)  # 等待时间随尝试次数增加

        if attempts == max_attempts:
            print("Failed to retrieve data after multiple attempts.")
            print('重新获取cookies')
            headers = get_headers_rzrq(headers_update)
        print('第' + str(i) + '次获取完成，准备休眠时间：' + str(sleep))
        if i < 9:
            # 等于9，证明数据已经获取完成
            time.sleep(sleep)

    return files_list


# 清洗出html文件中的数据
def parse_html(file_path, encode='gbk'):
    with open(file_path, 'r', encoding=encode) as f:
        soup = BeautifulSoup(f.read(), 'html.parser')
        tbody = soup.find("tbody")
        # print(table)
        all_tr = tbody.find_all('tr')
        res_tr = []
        for tr in all_tr:
            all_td = tr.find_all('td')
            res_td = []
            for td in all_td:
                res_td.append(td.get_text().replace('\n\t\t\t', ''))
                # print(td.get_text())
            res_tr.append(res_td)
        return res_tr


# 清洗数据并导入数据库
def handle_data(data, stock_code, stock_name):
    df = pd.DataFrame(data,
                      columns=['seq', 'stat_date', 'rzye', 'mre', 'che', 'rzjmr', 'rqyl', 'rqmcl', 'rqchl', 'rqjmc',
                               'rzrqye'])
    # 遍历DataFrame中的每一列
    # for col in df.columns:
    #     # print(f"Column Name: {col}")
    #     # if col != 'hangye' and col != 'lzg':
    #     #     df[col] = df[col].str.replace("\'", "") #df[col].replace(",", "")
    #     if col == 'hyzs_zdf' or col == 'lzg_zdf':
    #         df[col] = df[col].str.replace("%", "")
    # 打印出当前列的所有数据
    #         print('处理了：' + df[col])
    # 根据清洗后的数据插入数据库
    for row in df.itertuples(index=False):
        print(row)
        sql = f"INSERT INTO stock_rzrq_ths (stock_code, stock_name, seq, stat_date, rzye, mre, che, rzjmr, rqyl, rqmcl, rqchl, rqjmc, rzrqye) VALUES ('{stock_code}','{stock_name}', %s, %s, %s,%s, %s, %s, %s,%s,%s, %s, %s)"  # SQL插入语句
        insert_db(sql, row)


# path = r'D:\workspace\python_all\zz_download_file\ths_rzrq\2024-07-27_1.html'
#
# res = parse_html(path)
# print(res)
stock_code = '300251'
stock_name = '光线传媒'
# A1爬取数据后生成的文件列表:调用前需要替换cookies,为了防止被网站拒绝，多搞几个cookies
# files = fetch_rqrq_by_stock_code('300251')
files = [r'D:\workspace\python_all\zz_download_file\ths_rzrq\2024-07-27_1.html',
         r'D:\workspace\python_all\zz_download_file\ths_rzrq\2024-07-27_2.html',
         r'D:\workspace\python_all\zz_download_file\ths_rzrq\2024-07-27_3.html']
# print(files)
# A2根据文件列表提取出数据
for file in files:
    print(file)
    # 解析数据
    res_data = parse_html(file, 'gbk')
    current_date = datetime.now().date()
    # A3 清洗数据并导入数据库
    handle_data(res_data, stock_code, stock_name)
