# 从连接获取到汇率，存入数据库
import json
import random
import re
import time
from datetime import datetime

import requests
from bs4 import BeautifulSoup
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait

from utils import WebDriverInstance, MysqlConnectInstance


# 2. 从公众号文章获取汇率
def get_exchange_rate_from_mp_with_selenium(mp_url):
    web_driver = WebDriverInstance().get_driver()
    result_list = []
    try:
        # 打开初始页面
        web_driver.get(mp_url)
        # 等待页面元素加载并找到需要点击的按钮或链接
        wait = WebDriverWait(web_driver, 20)
        print('wait finish...')
        wait.until(EC.element_to_be_clickable((By.ID, 'content_bottom_area')))
        # 获取网页内容
        html_content = web_driver.page_source
        result_list = get_exchange_rate_from_html_content(html_content)
    except WebDriverException as e:
        print(f'发生异常，最后一次地址是：{mp_url}')
        web_driver.quit()
        # print(f'发生异常，最后一次地址是：{mp_url}')
    return result_list


# 从公众号获取数据 明细。
def get_exchange_rate_from_mp(mp_url):
    time.sleep(random.uniform(5, 10))
    # Fetch the webpage
    response = requests.get(mp_url, verify=False)
    html_content = response.text
    return get_exchange_rate_from_html_content(html_content)


def get_exchange_rate_from_html_content(html_content):
    # Parse the HTML
    soup = BeautifulSoup(html_content, 'html.parser')

    # Find the table
    table = soup.find('table')
    object_list = []
    # Extract the text from the table

    object_list = []
    text_list = []
    key_list = []

    if table is None:
        time.sleep(random.uniform(5, 10))
        return object_list

    for row in table.find_all('tr'):
        for cell in row.find_all(['td', 'th']):
            text = cell.get_text()
            clean_text = re.sub('<[^<]+?>', '', text).strip()  # Remove HTML tags and trim whitespace
            clean_text = re.sub(r'\s+', '', text)
            if "=" in clean_text:
                text_list.append(clean_text)
                key = re.sub(r"[0-9.]+", "", clean_text)
                key_list.append(key)
                o = {
                    "key": key,
                    "value": clean_text
                }
                object_list.append(o)

    # 'text_list' now contains the extracted text from the table cells
    #
    # print(text_list)
    # print(key_list)

    # 将带等号的数据提取出来 ，然后去除两边数字组成一个数组的key

    for obj in object_list:
        if obj.get("key") == "USD=N" or obj.get("key") == "CNY=N":
            key = obj.get('key').replace("=N", "=NGN")
            value = obj.get('value').replace("=N", '=NGN')
            obj["key"] = key
            obj["value"] = value

    return object_list


# 加载数据
def read_json_file(path):
    with open(path, 'r') as f:
        data = json.load(f)
        return data
    pass


# 将当天数据写入 Mysql
def write_data_to_mysql(title, has_grab, object_list):
    connection = MysqlConnectInstance().get_connection()
    if connection.is_connected():
        print("成功连接到数据库")
    data_to_insert = [(title, item['value'], title, item['key']) for item in object_list]
    rowcount = 0
    if connection.is_connected():
        print("成功连接到数据库")

        # 创建游标
        cursor = connection.cursor()
        sql_str = """
             INSERT INTO exchange_data_log (`rate_date_str`,`value_str`, `rate_date`, `key_str`)
             VALUES (%s, %s,%s, %s)
             """

        if has_grab == 1:
            sql_str = "UPDATE exchange_data_log set rate_date_str = %s , value_str = %s where rate_date=%s and key_str = %s"

        # 批量插入数据
        cursor.executemany(sql_str, data_to_insert)

        # 提交更改
        connection.commit()
        rowcount = cursor.rowcount
        print(f"成功插入 {cursor.rowcount} 条记录")
    return rowcount


def get_current_date():
    # 获取当前日期
    current_date = datetime.now()
    # 格式化日期为 "YYYY-MM-DD"
    formatted_date = current_date.strftime("%Y-%m-%d")
    return formatted_date


def update_grap_status(id):
    sql = "update mp_url_log set has_grab = 1 where id = %s"
    connect = MysqlConnectInstance().get_connection()
    cursor = connect.cursor()
    cursor.execute(sql, (id,))
    connect.commit()
    print(f"更新抓取条目状态 {cursor.rowcount}条")
    return cursor.rowcount


def get_waiting_grap():
    get_waiting_sql = "select rate_date, url , has_grab , id from mp_url_log where has_grab = 0 or rate_date = %s"
    connect = MysqlConnectInstance().get_connection()
    cursor = connect.cursor()
    cursor.execute(get_waiting_sql, (get_current_date(),))
    result_list = cursor.fetchall()
    cursor.close()
    print(f"返回待抓取 {len(result_list)} 条")
    return result_list


def main():
    # load all the data
    # 抓取数据库中没有抓取的，和当天的、进行挖掘
    rows = get_waiting_grap()
    for row in rows:
        mp_url = row[1]
        title = row[0]
        has_grab = row[2]
        data_exchange_rates = get_exchange_rate_from_mp_with_selenium(mp_url)
        rowcount = write_data_to_mysql(title, has_grab, data_exchange_rates)
        # 更新当条记录
        if rowcount > 0:
            update_grap_status(row[3])
        print(f'{title}...done')
    print('抓取数据完成...')

    # exchange_urls = read_json_file('exchange_ngn_urls.json')
    # for exchange_url in exchange_urls:
    #     mp_url = exchange_url['url']
    #     title = ReUtils.extract_date(exchange_url['title'])
    #     data_exchange_rates = get_exchange_rate_from_mp_with_selenium(mp_url)
    #     write_data_to_mysql(title, data_exchange_rates)
    #     print(f'{title}...done')
    # print('all done!')

if __name__ == '__main__':
    main()
