import traceback
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.edge.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions
from dvadmin.system.core.wos_config import *
from django.db import connection
import re
import time
import csv
import xlrd
import requests


def crawl_doc_info(args: dict):
    journal = args['journal']
    year = args['year']
    agent_id = args['agent_id']
    agent_record_id = args['agent_record_id']
    last_num = args['last_num']
    # 请求头
    headers = post_headers
    # post 负载数据
    xls_post_data = post_data
    # 日志
    log_sqls = []
    # 信息总条数(-1表示初始化，0表示爬取成功但为0，-n表示爬取失败但上一次成功数为n）
    total = -1
    # 表
    db_table = f'document_{year}'


    # is_running状态更改为True
    cursor = connection.cursor()
    cursor.execute(f"UPDATE agent SET is_running=TRUE WHERE id={agent_id}")
    try:
        # 拼接检索字符串 使用高级检索来获取文献数据
        query = get_query(journal, year)

        # 模拟检索 获取检索ID和结果总数
        headers['X-1p-Wos-Sid'], xls_post_data['parentQid'], total = \
            get_ids_and_total(query, agent_record_id, log_sqls)

        # 判断是否有更新
        if last_num >= total:
            log_info(f"[{journal}] 于 [{year}] 年的文献信息未更新，无需下载", agent_record_id, log_sqls)
            return
        else:
            start_index = last_num + 1

        # 分批获取文献信息
        batch_download(start_index, total, xls_post_data, headers, db_table, agent_id, agent_record_id, log_sqls)


    except Exception as e:
        traceback.print_exc()
        log_fatal(f"异常信息: {str(e)}", agent_record_id, log_sqls)
    finally:
        # 关闭连接
        connection.commit()
        cursor.close()
        # 由于使用单例进行下载 所以下载完需要将日志、数据清除
        after_download(total, agent_id, year, log_sqls)


# 返回 sid, qid, total
def get_ids_and_total(query: str, record_id: int, log_sqls: list[str]):
    browser = get_browser()

    # 浏览器模拟获取 QID 和 文献数量
    try:
        url = 'https://webofscience.clarivate.cn/wos/alldb/advanced-search'

        # 等待页面加载完毕
        browser.implicitly_wait(5)
        browser.get(url)

        # 提交按钮定位器
        submit_button_locator = (
            By.CSS_SELECTOR,
            '.mat-focus-indicator.search.mat-flat-button.mat-button-base.mat-primary.ng-star-inserted')
        qid_div_locator = (By.CSS_SELECTOR, '.small-info-text.font-size-13')
        log_info('开始模拟检索行为', record_id, log_sqls)

        # 等待 高级检索页 加载完毕
        WebDriverWait(browser, 5, 1) \
            .until(expected_conditions.element_to_be_clickable(submit_button_locator))
        log_info('高级检索页 加载成功', record_id, log_sqls)

        # 查找 textarea 元素输入检索信息 并跳转到检索结果页
        log_info('正在准备跳转至检索结果页...', record_id, log_sqls)
        input_elem = browser.find_element(By.ID, 'advancedSearchInputArea')
        input_elem.send_keys(query)

        search_btn = browser.find_element(
            By.CSS_SELECTOR,
            '.mat-focus-indicator.search.mat-flat-button.mat-button-base.mat-primary.ng-star-inserted')
        browser.execute_script("arguments[0].click()", search_btn)
        # search_btn.click()
    except Exception as e:
        # 检索页加载失败
        log_fatal(f'检索页加载失败 请确认可以正常访问 Web of Science 网站', record_id, log_sqls)
        browser.quit()
        raise e

    try:
        # 等待 检索结果页 加载完毕
        WebDriverWait(browser, 5, 1) \
            .until(expected_conditions.presence_of_element_located(qid_div_locator))
        log_info('检索结果页 加载成功', record_id, log_sqls)

        # 获取 js 中 sessionData 的 SID（是请求头中的一项参数）
        sid = re.findall(r'(?<="SID":")[^,]{29}(?=")', browser.find_element(By.XPATH, '/html/head')
                         .get_attribute('innerHTML'))[0]
        log_info(f'SID:\t{sid}', record_id, log_sqls)

        # 获取 qid（判断检索条件的 ID）
        qid_div = browser.find_element(By.CSS_SELECTOR, '.small-info-text.font-size-13')
        qid = qid_div.get_attribute('data-ta-search-info-qid')#e5961b8c-26cf-4488-a481-b738e40ad10b-012318f5d1
        log_info(f'QID:\t{qid}', record_id, log_sqls)

        # 获取检索结果总数 total
        total_span = browser.find_element(By.CSS_SELECTOR, '.brand-blue')
        total = int(total_span.text.replace(',', ''))
        log_info(f'检索结果总数:\t{total}', record_id, log_sqls)
        log_info('模拟结束', record_id, log_sqls)
        return sid, qid, total
    except Exception as e:
        # 没有检索出任何结果 说明参数非法
        log_fatal('跳转检索结果页失败 可能原因为无检索结果', record_id, log_sqls)

        # 将此次期刊爬取记录标志为无数据
        cursor = connection.cursor()
        cursor.execute(f"UPDATE agent_record SET last_query_num=-1 WHERE id={record_id}")
        connection.commit()
        cursor.close()
        raise e
    finally:
        browser.quit()


def batch_download(start_index: int,
                   total: int,
                   xls_post_data: dict,
                   headers: dict,
                   db_table: str,
                   agent_id: int,
                   record_id: int,
                   log_sqls: list[str]):
    # 第一批数据标识 若为第一批数据，则忽略其表格头信息
    first = True

    # 若文献信息数量大于1000，则进行分批下载
    cursor = connection.cursor()
    for i in range(start_index, total, 1000):
        # 爬取下标范围
        _from, _to = i, i + 999

        # 分批下载
        success = True
        cost_time = 0
        try:
            log_info(f'开始下载下标范围: {_from}-{_to} 的 xls 文献信息', record_id, log_sqls)
            start = int(time.time_ns() / 1e6)
            download_doc_info(_from, _to, first, headers, xls_post_data, db_table, agent_id, record_id, log_sqls)
            cost_time = int(time.time_ns() / 1e6) - start
        except Exception as e:
            success = False
            traceback.print_exc()
            e = str(e).replace('\'', '\\\'')
            log_error(f'异常信息: {e}', record_id, log_sqls)

        # 创建爬取记录信息
        cursor.execute(
            f"INSERT INTO `crawl_record` "
            f"VALUE (DEFAULT,{record_id},{int(time.time())},{cost_time},{_from},{_to},'{'成功' if success else '失败'}')"
        )

        # 写入日志信息
        write_logs(cursor, log_sqls)

        # 提交事务
        connection.commit()
        first = False

        # 爬取延迟 防止被封IP
        log_info(f'下载延迟 1 秒', record_id, log_sqls)
        time.sleep(1)

    # 关闭游标
    cursor.close()


def download_doc_info(_from: int,
                      _to: int,
                      first: bool,
                      headers: dict,
                      xls_post_data: dict,
                      db_table: str,
                      agent_id: int,
                      record_id: int,
                      log_sqls: list[str]):
    # 下标起始
    xls_post_data['markFrom'] = str(_from)
    # 下标结束
    xls_post_data['markTo'] = str(_to)

    response = requests.post(url=xls_post_url,
                             headers=headers,
                             json=xls_post_data,
                             stream=True)
    log_info(f'下载请求响应成功', record_id, log_sqls)

    if response.ok:
        log_info('请求成功, 开始读取 xls 文件', record_id, log_sqls)

        # 写入 xls 文件到 path 目录下的指定文件
        xls_dir_path = file_save_path + f'/xls/{agent_id}/{record_id}'
        xls_file_path = f'{xls_dir_path}/{_from}_{_to}.xls'

        # 创建目录
        if not os.path.exists(xls_dir_path):
            os.makedirs(xls_dir_path)

        # 分块写入
        with open(xls_file_path, 'wb') as f:
            for chunk in response.iter_content(chunk_size=1024):
                if chunk:
                    f.write(chunk)
            log_info(f'xls 文件下载完成, 文件名: {_from}_{_to}.xls', record_id, log_sqls)

        # 转换为 csv 并写入数据库
        write_to_csv_and_db(xls_file_path, first, db_table, agent_id, record_id, log_sqls)
    else:
        log_error(f'{_from}-{_to} 范围的数据请求失败', record_id, log_sqls)
        raise Exception(f"错误代码: {response.status_code}\t"
                        f"错误信息: {response.json()['errm'] if 'errm' in response.json() else response.text}")


def write_to_csv_and_db(xls_path: str,
                        first: bool,
                        db_table: str,
                        agent_id: int,
                        record_id: int,
                        log_sqls: list[str]):
    # 创建目录
    csv_dir_path = file_save_path + f'/csv/{agent_id}'
    if not os.path.exists(csv_dir_path):
        os.makedirs(csv_dir_path)

    # 使用 xlrd 库把 xls 文件转换为 csv
    wb = xlrd.open_workbook(xls_path)
    sheet = wb.sheet_by_index(0)

    with open(f'{csv_dir_path}/{record_id}.csv', 'w' if first else 'a', newline='', encoding='utf-8') as f:
        # 若是第一批数据，则写入表头
        csv_writer = csv.writer(f)
        if first:
            csv_writer.writerow(sheet.row_values(0))

        # 写入 csv 文件和数据库
        insert_sql = f'INSERT INTO {db_table} VALUES '
        for row in range(1, sheet.nrows):
            values = [str(field).replace('\'', '\\\'') for field in sheet.row_values(row)]
            values = "','".join(values[:-1])
            insert_sql += f"(DEFAULT,'{values}'),"
            csv_writer.writerow(sheet.row_values(row))

        # 写入数据库并提交
        cursor = connection.cursor()
        cursor.execute(insert_sql[:-1])
        log_info('文献信息写入数据库成功', record_id, log_sqls)

        connection.commit()
        cursor.close()


def after_download(total: int, agent_id: int, year: str, log_sqls: list[str]):
    # 更新指定年份期刊的检索数量 防止因任务失败而导致数据不一致
    cursor = connection.cursor()
    # is_running状态为False,表示结束运行
    cursor.execute(f"UPDATE agent SET is_running=FALSE WHERE id={agent_id}")
    if total >= 0:
        cursor.execute(
            f"UPDATE agent_record SET last_query_num={total} WHERE agent_id={agent_id} and year={year}")
    else:
        cursor.execute(
            f"SELECT last_query_num FROM agent_record WHERE agent_id={agent_id} and year={year}")
        last_query_num = cursor.fetchone()[0] if cursor.rowcount > 0 else -1
        if last_query_num>0:
            cursor.execute(
                f"UPDATE agent_record SET last_query_num=-last_query_num WHERE agent_id={agent_id} and year={year}")
        else:
            # 最新查询数<0则不更新
            pass
    # 写入日志数据
    write_logs(cursor, log_sqls)

    # 提交事务
    connection.commit()
    cursor.close()


def log_info(info: str, record_id: int, log_sqls: list[str]):
    add_log(log_states['INFO'], info, record_id, log_sqls)


def log_warn(info: str, record_id: int, log_sqls: list[str]):
    add_log(log_states['WARN'], info, record_id, log_sqls)


def log_error(info: str, record_id: int, log_sqls: list[str]):
    add_log(log_states['ERROR'], info, record_id, log_sqls)


def log_fatal(info: str, record_id: int, log_sqls: list[str]):
    add_log(log_states['FATAL'], info, record_id, log_sqls)


def write_logs(cursor, log_sqls: list[str]):
    # 无日志信息
    if len(log_sqls) == 0:
        return

    # 插入日志
    cursor.execute(
        f"INSERT INTO crawl_log "
        f"VALUES {','.join(log_sqls)}"
    )

    # 清空日志
    log_sqls.clear()


def add_log(status: str, info: str, record_id: int, log_sqls: list[str]):
    # 秒时间戳
    timestamp = int(time.time())

    # 防止信息中的单引号与插入语句的单引号冲突
    info = info.replace(r"'", r"\'")

    # 数据库中info字段的长度限制为512
    if len(info) > 512:
        info = info[:509] + '...'

    log_sqls.append(f"(DEFAULT,'{status}','{info}',{record_id},{timestamp})")
    print(f"{datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')} [{status}] {info}")


def get_query(journal: str, year: str):
    return add_to_query(add_to_query('','AND','SO',journal),'AND','PY',year)


# 拼接检索字符串
def add_to_query(query: str, boolean: str, field: str, keyword: str) -> str:
    if query is None or query.strip() == '':
        return f'{field}=({keyword})'
    return f'({query}){boolean} {field}=({keyword})'


def get_browser(headless=True, disable_gpu=False):
    edge_options = Options()

    # 是否显示浏览器界面
    if headless:
        edge_options.add_argument("--headless")

    # 是否禁用 GPU
    if disable_gpu:
        edge_options.add_argument("--disable-gpu")

    # 浏览器的 exe 路径
    edge_options.binary_location = edge_path
    return webdriver.Edge(options=edge_options)


def download_delay(delay: int = 1):
    time.sleep(delay)
