import threading
import traceback
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.edge.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions
from dvadmin.system.core.wos_config import *
from django.db import connection
import re
import time
import csv
import xlrd
import requests
###################################################本文件已经弃用#########################################################

# 爬虫线程类 弃用
class DownloadThread(threading.Thread):
    # 状态 0: 空闲 1: 正在运行
    status = 0
    # 请求头
    headers = post_headers
    # post 负载数据
    xls_post_data = post_data
    # 浏览器实例 避免多次创建浏览器
    browser = None
    # 数据表
    db_table = None
    # 日志 sql语句
    log_sqls = []
    # 期刊 ID
    agent_id = None
    # 检索起始下标
    index_start = 1
    # 检索结果总数
    total = -1

    # 线程构造方法
    def __init__(self, args: dict):
        super().__init__()
        # 设置期刊名和年份
        self.journal = args['journal']
        self.year = args['year']
        self.db_table = f'document_{self.year}'
        self.agent_id = args['agent_id']
        self.agent_record_id = args['agent_record_id']
        self.last_num = args['last_num']
        # 设置为守护线程
        self.daemon = True

    # 线程执行方法
    def run(self):
        # 设定状态 和 检验参数
        self.before_download()

        try:
            self.download()
        except Exception as e:
            traceback.print_exc()
            self.log_fatal(f"异常信息: {str(e)}")
        finally:
            # 由于使用单例进行下载 所以下载完需要将日志、数据清除
            self.after_download()

    def download(self):
        # 拼接检索字符串 使用高级检索来获取文献数据
        query = self.get_query()

        # 模拟检索 获取检索ID
        print(f'debug--{query}')
        self.get_qid_and_num(query)

        # 判断是否有更新
        if self.last_num >= self.total:
            self.log_info(f"[{self.journal}] 于 [{self.year}] 年的文献信息未更新，无需下载")
            return
        else:
            self.index_start = self.last_num + 1

        # 分批获取文献信息
        self.batch_download()

    def get_qid_and_num(self, query: str):
        try:
            # 浏览器模拟获取 QID 和 文献数量
            self.browser = self.get_browser()

            # 浏览器模拟检索行为
            url = 'https://webofscience.clarivate.cn/wos/alldb/advanced-search'

            # 等待页面加载完毕
            self.browser.implicitly_wait(5)
            self.browser.get(url)

            # 提交按钮定位器
            submit_button_locator = (
                By.CSS_SELECTOR,
                '.mat-focus-indicator.search.mat-flat-button.mat-button-base.mat-primary.ng-star-inserted')
            qid_div_locator = (By.CSS_SELECTOR, '.small-info-text.font-size-13')
            self.log_info('开始模拟检索行为')

            # 等待 高级检索页 加载完毕
            WebDriverWait(self.browser, 5, 1) \
                .until(expected_conditions.element_to_be_clickable(submit_button_locator))
            self.log_info('高级检索页 加载成功')

            # 查找 textarea 元素输入检索信息 并跳转到检索结果页
            self.log_info('正在准备跳转至检索结果页...')
            input_elem = self.browser.find_element(By.ID, 'advancedSearchInputArea')
            input_elem.send_keys(query)

            search_btn = self.browser.find_element(
                By.CSS_SELECTOR,
                '.mat-focus-indicator.search.mat-flat-button.mat-button-base.mat-primary.ng-star-inserted')
            # search_btn.click()
        except Exception as e:
            # 检索页加载失败
            self.log_fatal(f'检索页加载失败 请确认可以正常访问 Web of Science 网站')
            raise e

        try:
            # 等待 检索结果页 加载完毕
            WebDriverWait(self.browser, 5, 1) \
                .until(expected_conditions.presence_of_element_located(qid_div_locator))
            self.log_info('检索结果页 加载成功')

            # 获取 js 中 sessionData 的 SID（是请求头中的一项参数）
            sid = re.findall(r'(?<="SID":")[^,]+(?=")', self.browser.find_element(By.XPATH, '/html/head/script[4]')
                             .get_attribute('innerHTML'))[0]
            self.headers['X-1p-Wos-Sid'] = sid
            self.log_info(f'SID:\t{sid}')

            # 获取 qid（判断检索条件的 ID）
            qid_div = self.browser.find_element(By.CSS_SELECTOR, '.small-info-text.font-size-13')
            qid = qid_div.get_attribute('data-ta-search-info-qid')
            self.xls_post_data['parentQid'] = qid
            self.log_info(f'QID:\t{qid}')

            # 获取检索结果总数 total
            total_span = self.browser.find_element(By.CSS_SELECTOR, '.brand-blue')
            self.total = int(total_span.text.replace(',', ''))
            self.log_info(f'检索结果总数:\t{self.total}')
            self.log_info('模拟结束')
        except Exception as e:
            # 没有检索出任何结果 说明参数非法
            self.log_fatal('跳转检索结果页失败 可能原因为无检索结果')

            # 将此次期刊爬取记录标志为无数据
            cursor = connection.cursor()
            cursor.execute(f"UPDATE agent_record SET last_query_num=-1 WHERE id={self.agent_record_id}")
            connection.commit()
            cursor.close()
            raise e

    def batch_download(self):
        # 第一批数据标识 若为第一批数据，则忽略其表格头信息
        first = True

        # 若文献信息数量大于1000，则进行分批下载
        cursor = connection.cursor()
        for i in range(self.index_start, self.total, 1000):
            # 爬取下标范围
            _from, _to = i, i + 999

            # 爬取延迟 防止被封IP
            self.download_delay()

            # 分批下载
            success = True
            cost_time = 0
            try:
                self.log_info(f'开始下载下标范围: {_from}-{_to} 的 xls 文献信息')
                start = int(time.time_ns() / 1e6)
                self.download_doc_info(_from, _to, first)
                cost_time = int(time.time_ns() / 1e6) - start
            except Exception as e:
                success = False
                traceback.print_exc()
                e = str(e).replace('\'', '\\\'')
                self.log_error(f'异常信息: {e}')

            # 创建爬取记录信息
            cursor.execute(
                f"INSERT INTO `crawl_record` "
                f"VALUE (DEFAULT,{self.agent_id},{int(time.time())},{cost_time},{_from},{_to},'{'成功' if success else '失败'}')"
            )

            # 写入日志信息
            self.write_logs(cursor)

            # 提交事务
            connection.commit()
            first = False

        # 关闭游标
        cursor.close()

    # 根据获取到的检索ID，请求爬取接口获取信息数据
    def download_doc_info(self, _from: int, _to: int, first: bool):
        # 下标起始
        self.xls_post_data['markFrom'] = str(_from)
        # 下标结束
        self.xls_post_data['markTo'] = str(_to)

        # 发送post请求并接收返回的xls文件
        qid = self.xls_post_data['parentQid']

        response = requests.post(url=xls_post_url,
                                 headers=self.headers,
                                 json=self.xls_post_data,
                                 stream=True)
        self.log_info(f'下载请求响应成功')

        if response.ok:
            self.log_info('请求成功, 开始读取 xls 文件')

            # 写入 xls 文件到 path 目录下的指定文件
            xls_dir_path = file_save_path + f'/xls/{self.journal}/{self.year}'
            xls_file_path = f'{xls_dir_path}/{_from}_{_to}.xls'

            # 创建目录
            if not os.path.exists(xls_dir_path):
                os.makedirs(xls_dir_path)

            # 分块写入
            with open(xls_file_path, 'wb') as f:
                for chunk in response.iter_content(chunk_size=1024):
                    if chunk:
                        f.write(chunk)
                self.log_info(f'xls 文件下载完成, 文件名: {_from}_{_to}.xls')

            # 转换为 csv 并写入数据库
            self.write_to_csv_and_db(xls_file_path, first)
        else:
            self.log_error(f'{_from}-{_to} 范围的数据请求失败')
            raise Exception(f"错误代码: {response.status_code}\t"
                            f"错误信息: {response.json()['errm'] if 'errm' in response.json() else response.text}")

    # 写入csv文件和数据库
    def write_to_csv_and_db(self, xls_path, first):
        # 创建目录
        csv_dir_path = file_save_path + f'/csv/{self.journal}'
        if not os.path.exists(csv_dir_path):
            os.makedirs(csv_dir_path)

        # 使用 xlrd 库把 xls 文件转换为 csv
        wb = xlrd.open_workbook(xls_path)
        sheet = wb.sheet_by_index(0)

        with open(f'{csv_dir_path}/{self.year}.csv', 'w' if first else 'a', newline='', encoding='utf-8') as f:
            # 若是第一批数据，则写入表头
            csv_writer = csv.writer(f)
            if first:
                csv_writer.writerow(sheet.row_values(0))

            # 写入 csv 文件和数据库
            insert_sql = f'INSERT INTO {self.db_table} VALUES '
            for row in range(1, sheet.nrows):
                values = [str(field).replace('\'', '\\\'') for field in sheet.row_values(row)]
                values = "','".join(values[:-1])
                insert_sql += f"(DEFAULT,'{values}'),"
                csv_writer.writerow(sheet.row_values(row))

            # 写入数据库并提交
            cursor = connection.cursor()
            cursor.execute(insert_sql[:-1])
            self.log_info('文献信息写入数据库成功')

            connection.commit()
            cursor.close()

    # 获取浏览器实例
    def get_browser(self, headless=True, disable_gpu=False):
        self.log_info('正在准备模拟环境...')
        edge_options = Options()

        # 是否显示浏览器界面
        if headless:
            edge_options.add_argument("--headless")

        # 是否禁用 GPU
        if disable_gpu:
            edge_options.add_argument("--disable-gpu")

        # 浏览器的 exe 路径
        edge_options.binary_location = edge_path
        return webdriver.Edge(options=edge_options)

    # 爬取任务前的准备工作
    def before_download(self):
        fatal_msg = '参数非法'
        self.status = 1

        # 参数为空
        if self.journal is None or self.year is None:
            self.log_fatal(fatal_msg)
            raise Exception("请求参数存在空值")
        len_journal = len(self.journal)
        year = int(self.year)

        # 限制期刊名 0-256 个字符
        if len_journal > 256:
            self.log_fatal(fatal_msg)
            raise Exception("期刊名长度非法")

        # 限制年份（万年虫bug隐患 XD）
        if year < 1900 or year > int(time.strftime('%Y', time.localtime())):
            self.log_fatal(fatal_msg)
            raise Exception("年份非法")

    # 爬取任务完成后的清理工作
    def after_download(self):
        print('after_download - begin')
        self.status = 0

        # 更新指定年份期刊的检索数量 防止因任务失败而导致数据不一致
        print('after_download - before update_record')
        cursor = connection.cursor()
        print(f"UPDATE agent_record SET last_query_num={self.total} WHERE agent_id={self.agent_id} and year={self.year}")
        cursor.execute(
            f"UPDATE agent_record SET last_query_num={self.total} WHERE agent_id={self.agent_id} and year={self.year}")
        print('after_download - after update_record')

        # 写入日志数据
        print('after_download - before write_log')
        self.write_logs(cursor)
        print('after_download - after write_log')
        self.browser.quit()

        # 提交事务
        connection.commit()
        cursor.close()
        print('after_download - end')

    # 拼接期刊、年份检索字符串
    def get_query(self):
        return self.add_to_query(self.add_to_query('',
                                                   'AND',
                                                   'SO',
                                                   self.journal),
                                 'AND',
                                 'PY',
                                 self.year)

    # 拼接检索字符串
    def add_to_query(self, query: str, boolean: str, field: str, keyword: str) -> str:
        if query is None or query.strip() == '':
            return f'{field}=({keyword})'
        return f'({query}){boolean} {field}=({keyword})'

    def download_delay(self, delay: int = 1):
        self.log_info(f'下载延迟 {delay} 秒')
        time.sleep(delay)

    def log_info(self, info: str):
        self.add_log(log_states['INFO'], info)

    def log_warn(self, info: str):
        self.add_log(log_states['WARN'], info)

    def log_error(self, info: str):
        self.add_log(log_states['ERROR'], info)

    def log_fatal(self, info: str):
        self.add_log(log_states['FATAL'], info)

    # 打印日志信息 并通知前端
    def add_log(self, status, info):
        timestamp = int(time.time_ns() / 1e3)

        # 防止信息中的单引号与插入语句的单引号冲突
        info = info.replace('\'', '\\\'')

        # 数据库中info字段的长度限制为512
        if len(info) > 512:
            info = info[:509] + '...'

        self.log_sqls.append(f"(DEFAULT,'{status}','{info}',{self.agent_record_id},{timestamp})")
        print(f"{datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')} [{status}] {info}")

    def write_logs(self, cursor):
        print(self.log_sqls)
        print(len(self.log_sqls))
        # 无日志信息
        if len(self.log_sqls) == 0:
            return

        # 插入日志
        cursor.execute(
            f"INSERT INTO crawl_log "
            f"VALUES {','.join(self.log_sqls)}"
        )

        # 清空日志
        self.log_sqls.clear()

    def cursor_execute(self, sql):
        with connection.cursor() as cursor:
            cursor.execute(sql)
            return cursor
