import random
import time
from datetime import datetime, timedelta
import pandas as pd  # 导入pandas库，用于读取Excel文件和处理数据
from bs4 import BeautifulSoup
from requests import HTTPError

from x01_stock.xx_util.mysql_util import insert_db
from x01_stock.xx_util.io_util import create_directory
from DrissionPage import ChromiumPage

# 1、爬取同花顺网站当天gnzj的数据，并返回下载后的文件名列表


def fetch_ths_gnzj(start_date):
    file_list = []
    file_path = 'D:\\workspace\\python_all\\zz_download_file\\gnzj\\'
    create_directory(file_path)

    # file_name_prex = file_path + datetime.now().date().strftime('%Y-%m-%d') + '_'
    file_name_prex = file_path + start_date.strftime('%Y-%m-%d') + '_'
    # 获取上证数据' https://data.10jqka.com.cn/funds/gnzjl/field/tradezdf/order/desc/page/' + str(i) + '/ajax/1/free'
    url_gnzj = 'https://data.10jqka.com.cn/funds/gnzjl/field/tradezdf/order/desc/page/'

    for i in range(1, 10):
        download_stock_daily(url_gnzj, i, file_name_prex, file_list)
    return file_list




def download_stock_daily(url_prev, index, file_name_prex, file_list):
    page = ChromiumPage()
    path = url_prev + str(index) + '/ajax/1/free'
    print("url地址：  " + path)
    page.get(path)
    s2 = 10 + random.randint(1, 5) * random.randint(1, 5)
    # 保存到文件
    try:
        file_full_path = file_name_prex + str(index) + '.html'
        print('存储的文件路径是：file_full_path = ' + file_full_path)
        # 将获取到的网页text拼接成html格式存放
        with open(file_full_path, 'w') as file:
            file.write(page.html)
            file_list.append(file_full_path)
    except HTTPError as http_err:
        print(f"HTTP error occurred: {http_err}")
    except Exception as err:
        print(f"An error occurred: {err}")
    print('第 ' + str(index) + ' 次获取成功，--------------准备休眠----------------- ' + str(s2) + ' 秒')
    # page.quit()
    time.sleep(s2)


# 清洗出html文件中的数据
def parse_html(file_path, encode):
    with open(file_path, 'r', encoding='gbk') as f:
        soup = BeautifulSoup(f.read(), 'html.parser')
        table = soup.find("table")
        # print(table)
        all_tr = table.find_all('tr')
        res_tr = []
        for tr in all_tr:
            all_td = tr.find_all('td')
            res_td = []
            for td in all_td:
                res_td.append(td.get_text())
                # print(td.get_text())
            res_tr.append(res_td)
        res_tr.pop(0)
        return res_tr


# 清洗数据并导入数据库
def handle_data(data, stat_date):
    df = pd.DataFrame(data,
                      columns=['seq', 'hangye', 'hyzs', 'hyzs_zdf', 'lrzj', 'lczj', 'zj_ze', 'gsjs', 'lzg', 'lzg_zdf', 'lzg_dqj'])
    # 遍历DataFrame中的每一列
    for col in df.columns:
        # print(f"Column Name: {col}")
        # if col != 'hangye' and col != 'lzg':
        #     df[col] = df[col].str.replace("\'", "") #df[col].replace(",", "")
        if col == 'hyzs_zdf' or col == 'lzg_zdf':
            df[col] = df[col].str.replace("%", "")
            # 打印出当前列的所有数据
    #         print('处理了：' + df[col])
    # 根据清洗后的数据插入数据库
    for row in df.itertuples(index=False):
        print(row)
        sql = f"INSERT INTO stock_gnzj (stat_date,seq,hangye,hyzs,hyzs_zdf,lrzj,lczj,zj_ze,gsjs,lzg,lzg_zdf,lzg_dqj) VALUES ('{stat_date}',%s, %s, %s, %s,%s, %s, %s, %s,%s,%s, %s)"  # SQL插入语句
        insert_db(sql, row)

if __name__ == '__main__':
    # 获取一段时间区间
    # start_date = datetime(2024, 9, 30).date()
    # end_date = datetime(2024, 10, 1).date()
    # while start_date < end_date:
    #
    #     # A1爬取数据后生成的文件列表:调用前需要替换cookies,为了防止被网站拒绝，多搞几个cookies
    #     files = fetch_stock_gnzj(start_date)
    #     # files =['D:\\workspace\\python_all\\zz_download_file\\gnzj\\2024-07-25_1.html',
    #     #         'D:\\workspace\\python_all\\zz_download_file\\gnzj\\2024-07-25_2.html']
    #     print(files)
    #     # A2根据文件列表提取出数据
    #     for file in files:
    #         # list = [['101', '在线教育', '1706.03', '1.05%', '44.04', '42.40', '1.64', '125', '华扬联众', '10.01%', '8.57'],
    #         # ['102', '工业互联网', '1360', '1.05%', '148.84', '136.67', '12.17', '287', '动力新科', '10.00%', '4.51']]
    #         # 解析数据
    #         res_data = parse_html(file, 'utf-8')
    #         current_date = datetime.now().date()
    #         # A3 清洗数据并导入数据库
    #         handle_data(res_data,start_date ) # datetime.now().date()
    #     start_date = start_date + timedelta(days=1)

    # A1爬取数据后生成的文件列表:调用前需要替换cookies,为了防止被网站拒绝，多搞几个cookies
    files = fetch_stock_gnzj(datetime(2024, 9, 30).date())
    # files =['D:\\workspace\\python_all\\zz_download_file\\gnzj\\2024-07-25_1.html',
    #         'D:\\workspace\\python_all\\zz_download_file\\gnzj\\2024-07-25_2.html']
    # print(files)
    # A2根据文件列表提取出数据
    for file in files:
        # list = [['101', '在线教育', '1706.03', '1.05%', '44.04', '42.40', '1.64', '125', '华扬联众', '10.01%', '8.57'],
        # ['102', '工业互联网', '1360', '1.05%', '148.84', '136.67', '12.17', '287', '动力新科', '10.00%', '4.51']]
        # 解析数据
        res_data = parse_html(file, 'utf-8')
        current_date = datetime.now().date()
        # A3 清洗数据并导入数据库
        handle_data(res_data,datetime.now().date()) # datetime.now().date()