import random
import time
from datetime import datetime
import pandas as pd  # 导入pandas库，用于读取Excel文件和处理数据
from bs4 import BeautifulSoup
from requests import HTTPError

from x01_stock.xx_util.DBUtil import SqlHelper
from x01_stock.xx_util.io_util import create_directory

from DrissionPage import ChromiumPage


# 清洗出html文件中的数据
def parse_html(file_path, encoding='gbk'):
    with open(file_path, 'r', encoding=encoding) as f:
        soup = BeautifulSoup(f.read(), 'html.parser')
        table = soup.find("tbody")
        # print(table)
        all_tr = table.find_all('tr')
        res_tr = []
        for tr in all_tr:
            all_td = tr.find_all('td')
            res_td = []
            for td in all_td:
                res_td.append(td.get_text())
                print(td.get_text())
            res_tr.append(res_td)
        return res_tr


# 清洗数据并导入数据库
def save_to_db(data, stat_date):
    df = pd.DataFrame(data,
                      columns=['id','stock_code', 'stock_name', 'stock_price', 'chg', 'price_change', 'grow_rate', 'change_hands', 'vol_rate', 'pct_chg', 'amount', 'circulating_stock', 'circulation_market_value', 'pe_ratio','self'])
    df.drop('id', axis=1, inplace=True)
    df.drop('self',axis=1,inplace=True)
    # # 遍历DataFrame中的每一列
    # for col in df.columns:
    #     # print(f"Column Name: {col}")
    #     # if col != 'hangye' and col != 'lzg':
    #     #     df[col] = df[col].str.replace("\'", "") #df[col].replace(",", "")
    #     if col == 'hyzs_zdf' or col == 'lzg_zdf':
    #         df[col] = df[col].str.replace("%", "")
    #         # 打印出当前列的所有数据
    #         print('处理了：' + df[col])
    # 根据清洗后的数据插入数据库
    for row in df.itertuples(index=False):
        print(row)
        print(stat_date)
        sql = (f" INSERT INTO `z_sproot_series`.`ths_stock_daily` (`trading_date`,`stock_code`, `stock_name`, `stock_price`, `chg`, `price_change`, `grow_rate`, `change_hands`, `vol_rate`, `pct_chg`, `amount`, "
               f"`circulating_stock`, `circulation_market_value`, `pe_ratio`) VALUES ('{stat_date}',%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);")  # SQL插入语句
        SqlHelper.get_instance().execute_curd(sql, row)
    print('数据插入完成')


def fetch_stock_daily():
    file_list = []
    file_path = 'D:\\workspace\\python_all\\zz_download_file\\stock_daily\\'
    create_directory(file_path)

    file_name_prex = file_path + datetime.now().date().strftime('%Y-%m-%d') + '_'
    page = ChromiumPage()
    # 获取上证数据'https://data.10jqka.com.cn/funds/gnzjl/field/tradezdf/order/desc/page/' + str(i) + '/ajax/1/free'
    sz = 'https://q.10jqka.com.cn/index/index/board/ss/field/zdf/order/desc/page/'
    sh = 'https://q.10jqka.com.cn/index/index/board/hs/field/zdf/order/desc/page/'

    for i in range(1, 115):
        download_stock_daily(sh, i, file_name_prex + 'sh', file_list)
    print('上证获取完毕，，，开始深市')
    for i in range(1, 144):
        download_stock_daily(sz, i, file_name_prex + 'sz', file_list)

    return file_list


def download_stock_daily( url_prev, index, file_name_prex, file_list):
    page = ChromiumPage()
    path = url_prev + str(index) + '/ajax/1/free/'
    page.get(path)
    s2 = random.randint(1, 5) + random.randint(1, 10)
    # 保存到文件
    try:
        file_full_path = file_name_prex + str(index) + '.html'
        print('存储的文件路径是：file_full_path = ' + file_full_path)
        # 将获取到的网页text拼接成html格式存放
        with open(file_full_path, 'w') as file:
            file.write(page.html)
            file_list.append(file_full_path)
    except HTTPError as http_err:
        print(f"HTTP error occurred: {http_err}")
    except Exception as err:
        print(f"An error occurred: {err}")
    print('第 ' + str(index) + ' 次获取成功，--------------准备休眠----------------- ' + str(s2) + ' 秒')
    # page.quit()
    time.sleep(s2)


if __name__ == '__main__':
    stat_date = '2024-8-22' # datetime.now().date()
    file_list = fetch_stock_daily()
    print(file_list)
    for file in file_list:
        res = parse_html(file, 'gbk')
        save_to_db(res,stat_date)
        print(res)

    # 手动补充数据
    # for i in range(1, 144):
    #     file_path = 'D:\\workspace\\python_all\\zz_download_file\\stock_daily\\2024-08-01_sz' + str(i) +'.html'
    #     print(file_path)
    #     res = parse_html(file_path, 'gbk')
    #     save_to_db(res, stat_date)
    #     print(res)