import os.path
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from bs4 import BeautifulSoup
import requests

import ai
from ai.utils import utils_file


def get_dropdown_links(the_url, dropdown_css_selector, link_css_selector):
    # 使用requests库获取HTML页面内容
    response = requests.get(the_url)
    html_content = response.text

    # 使用BeautifulSoup解析HTML
    soup = BeautifulSoup(html_content, 'html.parser')

    # 使用Selenium打开浏览器，模拟点击下拉框
    driver = webdriver.Chrome()  # 请确保你已经安装了Chrome浏览器并下载了对应版本的ChromeDriver
    driver.get(the_url)

    # 等待页面加载，可以根据实际情况调整等待时间
    time.sleep(2)

    # 定位下拉框
    dropdown = driver.find_element(By.CSS_SELECTOR, dropdown_css_selector)

    # 遍历下拉框的所有选项
    for option in dropdown.find_elements(By.TAG_NAME, 'option'):
        # 点击选项
        option.click()

        # 等待页面加载，可以根据实际情况调整等待时间
        time.sleep(2)

        # 获取当前页面的HTML内容
        current_html = driver.page_source

        # 使用BeautifulSoup解析当前页面的HTML
        current_soup = BeautifulSoup(current_html, 'html.parser')

        # 定位链接并获取href值
        link = current_soup.select_one(link_css_selector)
        if link:
            href_value = link.get('href')
            print(f"Option: {option.text}, Link Href: {href_value}")

    # 关闭浏览器
    driver.quit()


header = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "Accept-Encoding": "gzip, deflate, br",
    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
    "Cache-Control": "max-age=0",
    "Connection": "keep-alive",
    "Sec-Fetch-Dest": "document",
    "Sec-Fetch-Mode": "navigate",
    "Sec-Fetch-Site": "same-origin",
    "Sec-Fetch-User": "?1",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "Windows",
}


def download_tar_gz_file_from_url(url, output_file_name):
    resp = requests.get(url, headers=header)
    utils_file.makedir_for_file_or_dir(output_file_name)
    with open(output_file_name, "wb") as f:
        f.write(resp.content)


def get_download_url_map():
    url = "https://commonvoice.mozilla.org/zh-CN/datasets"
    output_file_path = "./output/download_url_map.json"
    utils_file.makedir_for_file_or_dir(output_file_path)
    options = Options()
    # options.add_experimental_option("debuggerAddress", "127.0.0.1:9222")
    driver = webdriver.Chrome(options=options)
    driver.get(url)
    driver.implicitly_wait(0.5)
    input_object = driver.find_element(By.CSS_SELECTOR,
                                       "#content > div > div > div.dataset-info > div.top > div.dataset-corpus-download > div > div:nth-child(3) > div > div:nth-child(1) > label.labeled-form-control.for-input > input[type=email]")
    input_object.send_keys("3349495419@qq.com")
    driver.implicitly_wait(0.5)
    time.sleep(0.5)
    checkbox1 = driver.find_element(By.CSS_SELECTOR,
                                    "#content > div > div > div.dataset-info > div.top > div.dataset-corpus-download > div > div:nth-child(3) > div > div:nth-child(1) > label:nth-child(2) > span.checkbox-container > input[type=checkbox]")
    checkbox1.click()
    driver.implicitly_wait(0.5)
    time.sleep(0.5)
    checkbox2 = driver.find_element(By.CSS_SELECTOR,
                                    "#content > div > div > div.dataset-info > div.top > div.dataset-corpus-download > div > div:nth-child(3) > div > div:nth-child(1) > label:nth-child(3) > span.checkbox-container > input[type=checkbox]")
    checkbox2.click()
    driver.implicitly_wait(0.5)
    time.sleep(0.5)
    res_dic = {}
    # a_main = driver.find_element(By.CSS_SELECTOR,
    #                              "#content > div > div > div.dataset-info > div.top > div.dataset-corpus-download > div > div:nth-child(3) > div > div:nth-child(2) > a")
    # href_str = a_main.get_attribute('href')
    # url_download = href_str
    # data_td_size = driver.find_element(By.CSS_SELECTOR,
    #                                    "#content > div > div > div.dataset-info > div.top > div.dataset-corpus-download > div > div:nth-child(3) > table > tbody > tr.selected > td:nth-child(3)")
    # size = data_td_size.text
    # data_td_time = driver.find_element(By.CSS_SELECTOR,
    #                                    "#content > div > div > div.dataset-info > div.top > div.dataset-corpus-download > div > div:nth-child(3) > table > tbody > tr.selected > td:nth-child(4)")
    # time_all = data_td_time.text + 'h'
    # data_td_time_valid = driver.find_element(By.CSS_SELECTOR,
    #                                          "#content > div > div > div.dataset-info > div.top > div.dataset-corpus-download > div > div:nth-child(3) > table > tbody > tr.selected > td:nth-child(5)")
    # time_valid = data_td_time_valid.text + 'h'
    # res_dic["中国"] = {"url": url_download, "size": size, "time_all": time_all, "time_valid": time_valid}
    dropdown = driver.find_element(By.CSS_SELECTOR,
                                   "#content > div > div > div.dataset-info > div.top > div.dataset-corpus-download > div > div.input-row > label > div > select")
    for option in dropdown.find_elements(By.TAG_NAME, 'option'):
        # 点击选项
        option.click()
        country_name = option.text
        # 等待页面加载，可以根据实际情况调整等待时间
        time.sleep(1.5)
        a_main = driver.find_element(By.CSS_SELECTOR,
                                     "#content > div > div > div.dataset-info > div.top > div.dataset-corpus-download > div > div:nth-child(3) > div > div:nth-child(2) > a")
        a_main = driver.find_element(By.CSS_SELECTOR,
                                     "#content > div > div > div.dataset-info > div.top > div.dataset-corpus-download > div > div:nth-child(3) > div > div:nth-child(2) > a")
        href_str = a_main.get_attribute('href')
        url_download = href_str
        data_td_size = driver.find_element(By.CSS_SELECTOR,
                                           "#content > div > div > div.dataset-info > div.top > div.dataset-corpus-download > div > div:nth-child(3) > table > tbody > tr.selected > td:nth-child(3)")
        size = data_td_size.text
        data_td_time = driver.find_element(By.CSS_SELECTOR,
                                           "#content > div > div > div.dataset-info > div.top > div.dataset-corpus-download > div > div:nth-child(3) > table > tbody > tr.selected > td:nth-child(4)")
        time_all = data_td_time.text + 'h'
        data_td_time_valid = driver.find_element(By.CSS_SELECTOR,
                                                 "#content > div > div > div.dataset-info > div.top > div.dataset-corpus-download > div > div:nth-child(3) > table > tbody > tr.selected > td:nth-child(5)")
        time_valid = data_td_time_valid.text + 'h'
        res_dic[country_name] = {"url": url_download, "size": size, "time_all": time_all, "time_valid": time_valid}
        print(res_dic[country_name])
        # break
    utils_file.write_dict_to_json(res_dic, output_file_path)


def download_by_wget(url, output_dir, country):
    file_name = utils_file.join_path(output_dir, country) + '.tar.gz'
    options = f"-c --read-timeout=5 --tries=0 -P {output_dir} -O {file_name}"
    # 拼接完整的wget命令
    command = f"wget {options} \"{url}\""
    # 调用os.system()函数执行wget命令，并获取其退出状态
    status = os.system(command)
    # 判断退出状态是否为0，如果为0表示成功，否则表示失败
    if status == 0:
        print(f"Download successful,url:{url}")
    else:
        print(f"Download failed,url:{url}")


def download_by_gxldownloader(downloader, url, country):
    gxl_downloader = downloader
    gxl_downloader.download(url, 'tar.gz', country)


def filter_file(json_file_path, ):
    """
    得到数据集小于500M的数据
    """
    res = {}
    other = {}
    dic = utils_file.load_json_file(json_file_path)
    for k, v in dic.items():
        size = v['size']
        if size.endswith('MB'):
            size = size.replace(" MB", "")
            size = size.replace(",", "")
            size_num = float(size)
            if size_num < 500:
                res[k] = v
                continue
        other[k] = v
    utils_file.write_dict_to_json(res, './output/file_litte500mb.json')
    utils_file.write_dict_to_json(other, './output/file_big500mb.json')


def from_url_map_get_file(json_file_path, output_dir):
    logger = ai.AiConstant.AI_LOGGER('./output/log.log')
    dic = utils_file.load_json_file(json_file_path)
    from ai.thread import my_thread
    my_thread_object = my_thread.MyThreadPool()
    gxl_downloader = utils_file.GxlDownloader(output_dir)
    for k, v in dic.items():
        logger.info('开始下载如下语言:' + k)
        output_dir_item = os.path.join(output_dir, k)
        utils_file.makedir_for_file_or_dir(output_dir_item)
        url = v['url']
        # download_by_wget(url, output_dir_item, k)
        my_thread_object.add_thread(download_by_gxldownloader, [gxl_downloader, url, k])
    my_thread_object.start()


def get_total_size(json_file_path):
    dic = utils_file.load_json_file(json_file_path)
    total_size = 0
    for k, v in dic.items():
        v['size'] = v['size'].replace(',', '')
        if v['size'].endswith('GB'):
            v['size'] = float(v['size'].replace('GB', '')) * 1024
        elif v['size'].endswith('MB'):
            v['size'] = float(v['size'].replace('MB', ''))
        elif v['size'].endswith('kB'):
            v['size'] = float(v['size'].replace('kB', '')) / 1024
        else:
            continue
        total_size += float(v["size"])
    print(total_size, "MB", total_size / 1024, "GB")
    return total_size


if __name__ == "__main__":
    """"""
    # from_url_map_get_file("./output/download_url_map.json", "/home/work_nfs7/data/common_voice/")
    # get_total_size("./output/download_url_map.json")
    # get_download_url_map()
    # filter_file('./output/download_url_map.json')
    from_url_map_get_file('./output/download_url_map.json', '/home/backup_nfs5/xlgeng/asr_data/common_voice/')
    # from_url_map_get_file('./output/download_url_map.json', './output/common_voice/')
    # gxl_dler = utils_file.GxlDownloader('./output/')
    # gxl_dler.download(
    #     "https://storage.googleapis.com/common-voice-prod-prod-datasets/cv-corpus-15.0-2023-09-08/cv-corpus-15.0-2023-09-08-ab.tar.gz?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=gke-prod%40moz-fx-common-voice-prod.iam.gserviceaccount.com%2F20231029%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20231029T090150Z&X-Goog-Expires=43200&X-Goog-SignedHeaders=host&X-Goog-Signature=32782f7da6969e2100f955e67a5a2cc19c2772a73d8ecf8280b547e307c5d4bda7ee78474a68f463297ce640bbe190cba7edc2dd2df978474638c9dc454f872f1666b34bc53ea866a38e0f6c5d427efa12cc5ec4e8b08b8bbc7cd4a44560da9e6e2074fdb8c03e75b15a9719f660240e3a3adcbb311442d8e6b246f2db85461764a33b5bc80b80a699e8acbedf26571aae5d6833c12f0df6ae08e5cb13db7075d4f8fc9ce6103fe7c9eae16d98134a7db2e102442599261f26337f6318c1be3f24d75f1dd254a202b1f8504634c9f3be46eaa6ad2652b1088a5809943c4472babccd4faa139aaab6c3a991fc229f5dff78f68c5845aed01fca23c40e40c0fd17",
    #     '.tar.gz', '中国')
