#!/usr/bin/python

from bs4 import BeautifulSoup

import os
import time

from util.csv import CSVFile
from util.dct import safe_save
from util.frequency import FreqControl
from util.log import logger
from util.requests import fetch_page
from util.settings import DATA_DIR


class WordQuery:
    name = 'word_query'
    headers = {
        'Host': 'www.iciba.com',
        'Connection': 'keep-alive',
        'Cache-Control': 'max-age=0',
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/63.0.3239.132 Safari/537.36',
        'Upgrade-Insecure-Requests': '1',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate',
        'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh-TW;q=0.7,zh;q=0.6',
        'Cookie': 'iciba_u_rand=83825ca0871012ceaa2f6307ca9556df%40219.239.227.245; iciba_u_rand_t=1528682727; '
                  'is_new_index=1; UM_distinctid=163ec979eed26b-0243c3922e7627-393d5f0e-140000-163ec979eee4d4; '
                  'cbdownload_time=download; CNZZDATA1257391275=583500471-1528678984-%7C1528781956; '
                  'screen-skin=screen-blue; cbdownload_num=18; search-history=show%2Cshows%2Cshowing; '
                  'CNZZDATA1256556802=1565248512-1528682737-http%253A%252F%252Fwww.iciba.com%252F%7C1528787258; '
                  'c_word_history=show%2Cshows%2Cshowing%2Chowever%2Cdo%2Csalad%2Csand%2Csave%2Cshock%2Cshare '
    }

    def __init__(self, words, freq_control):
        self.words = words
        self.retry = set(self.words.keys())
        self.past_tense = {}
        self.past_participle = {}
        self.present_participle = {}
        self.singular = {}
        self.plural = {}
        self.unknown_var = {}
        self.unknown_type = {}
        self.scheduler = freq_control
        self.success_count = 0
        self.fail_count = 0

    def __fetch_word(self, *args, **kwargs):
        if len(kwargs) < 1:
            raise Exception("没有传入待查询的单词")
        word = kwargs['args']['word']
        return self.fetch_word(word)

    def __end_one_success_query(self, job_id, word, var_count):
        self.success_count += 1
        if word in self.retry:
            self.retry.remove(word)
        self.scheduler.show_progress(job_id, '单词: {} ! 单词变体数量: {} !'.format(word, var_count), auto_new_line=False)

    def __end_one_fail_query(self, job_id, word):
        self.fail_count += 1
        self.scheduler.show_progress(job_id, '单词: {} ! 查询失败，记录单词等待以后重试 !'.format(word), auto_new_line=True)

    def __end_one_empty_query(self, job_id, word):
        self.success_count += 1
        if word in self.retry:
            self.retry.remove(word)
        self.scheduler.show_progress(job_id, '单词: {} ! 没有变体 !'.format(word), auto_new_line=False)

    def __end_one_exception_query(self):
        self.fail_count += 1

    def __extract_variables(self, obj):
        word = None
        try:
            ret = self.scheduler.get_result(obj)
            job_id = ret['id']
            word = ret['word']
            if not ret['success_status']:
                self.__end_one_fail_query(job_id, word)
                return
            html_text = ret['html_text']
            variable_part = BeautifulSoup(html_text, 'lxml').find('li', class_='change clearfix')
            if variable_part is None:
                self.__end_one_empty_query(job_id, word)
                return
            variable_spans = variable_part.find_all('span')
            if variable_spans is None:
                self.__end_one_empty_query(job_id, word)
                return
            var_count = 0
            for span_index, span in enumerate(variable_spans):
                var_type = span.get_text().split('：')[0].strip()
                all_var = span.find_all('a')
                var_contents = [var.get_text().strip() for var in all_var]
                var_count += len(var_contents)
                if var_type == '过去式':
                    for var_content in var_contents:
                        safe_save(self.past_tense, var_content, word)
                elif var_type == '过去分词':
                    for var_content in var_contents:
                        safe_save(self.past_participle, var_content, word)
                elif var_type == '现在分词':
                    for var_content in var_contents:
                        safe_save(self.present_participle, var_content, word)
                elif var_type == '第三人称单数':
                    for var_content in var_contents:
                        safe_save(self.singular, var_content, word)
                elif var_type == '复数':
                    for var_content in var_contents:
                        safe_save(self.plural, var_content, word)
                else:
                    for var_content in var_contents:
                        safe_save(self.unknown_var, var_content, word)
                        safe_save(self.unknown_type, var_content, var_type)
            if var_count > 0:
                self.__end_one_success_query(job_id, word, var_count)
            else:
                self.__end_one_empty_query(job_id, word)
        except:
            logger.exception("提取网页中单词{}变体信息过程中出现异常！记录单词等待以后重试：".format("" if word is None else " {} ".format(word)))
            self.__end_one_exception_query()

    def fetch_word(self, word):
        url = 'http://www.iciba.com/{}'.format(word)
        success_status, html_text = fetch_page(url=url, headers=WordQuery.headers)
        if success_status:
            return {'word': word, 'success_status': True, 'html_text': html_text}
        else:
            self.retry.add(word)
            logger.warning("单词 {} 的网页抓取失败".format(word))
            return {'word': word, 'success_status': False, 'html_text': ""}

    def query(self, query_all=True):
        try:
            if query_all:
                logger.info("开始查询全部 {} 个单词".format(len(self.words)))
                self.scheduler.run_jobs(method=self.__fetch_word, word=self.words.keys(), job_ids=self.words.keys(), callback=self.__extract_variables)
            else:
                logger.info("开始查询未能成功的 {} 个单词".format(len(self.retry)))
                self.scheduler.run_jobs(method=self.__fetch_word, word=list(self.retry), job_ids=self.retry, callback=self.__extract_variables)
            self.scheduler.wait_finish()
            print("\n")
            logger.info("完成全部查询")
        except:
            print("\n")
            logger.exception("查询过程中出现异常:")
        finally:
            self.save_to_file()

    def save_to_file(self):
        date_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
        self.__save_one_file(self.past_tense, date_time, '过去式')
        self.__save_one_file(self.past_participle, date_time, '过去分词')
        self.__save_one_file(self.present_participle, date_time, '现在分词')
        self.__save_one_file(self.singular, date_time, '第三人称单数')
        self.__save_one_file(self.plural, date_time, '复数')
        self.__save_one_file(self.unknown_var, date_time, '未知变形单词')
        self.__save_one_file(self.unknown_type, date_time, '未知变形类型')
        if len(self.retry) > 0:
            retry_dct = {word: self.words[word] for word in self.retry}
            self.__save_one_file(retry_dct, date_time, '抓取失败')

    @staticmethod
    def __save_one_file(dct, date_time, name):
        if len(dct) > 0:
            logger.info("有 {} 条{}记录待写入文件".format(len(dct), name))
            csv_file = CSVFile(os.path.join(DATA_DIR, '{}_{}.csv'.format(date_time, name)), 'utf8')
            csv_file.write_dict(dct, auto_close=True)

    @staticmethod
    def show_variables(word, html_text):
        if result['success_status']:

            variable_spans = BeautifulSoup(html_text, 'lxml').find('li', class_='change clearfix').find_all('span')
            logger.info('原词:' + word)
            for span_index, span in enumerate(variable_spans):
                logger.info('  变体{}'.format(span_index + 1))
                logger.info('    变体类型 :' + span.get_text().split('：')[0].strip())
                all_var = span.find_all('a')
                for var_index, var in enumerate(all_var):
                    logger.info('    变体内容{}:{}'.format(var_index + 1, var.get_text().strip()))
        else:
            logger.info('网页中提取不到变体信息')


if __name__ == '__main__':
    fc = FreqControl(thread_count=1, min_sleep_time=0, max_sleep_time=0)
    wq = WordQuery(['show'], fc)
    result = wq.fetch_word('abandon')
    if result['success_status']:
        wq.show_variables(result['word'], result['html_text'])
    else:
        pass
