# coding=utf-8
from html_crawler import HtmlCrawler
from html_parser import HtmlParser
from url_manager import UrlManager
from data_saver import DataSaver

from tools import *


class CrawlerMain:
    def __init__(self):
        self.html_crawler = HtmlCrawler()
        self.html_parser = HtmlParser()
        self.url_manager = UrlManager()
        self.data_saver = DataSaver()

    def crawler(self, page_url, word_url, level):
        # 获取word_list链接
        html_content = self.html_crawler.download_html(page_url)
        wordbook_urls = self.html_parser.parse(html_content, page_url, 'wordbook')
        self.url_manager.add_new_urls(wordbook_urls)

        while self.url_manager.has_new_url():
            try:
                get_url = self.url_manager.get_new_url()
                index = 1
                while True:
                    words_content = []
                    html_content = self.html_crawler.download_html(get_url, params={'page': index})
                    word_lists = self.html_parser.parse(html_content, get_url, 'wordlist')

                    # 超出单词页退出循环
                    if word_lists is None:
                        break

                    for word in word_lists:
                        word_main = self.html_crawler.get_word_main(word)
                        word_main_content = self.html_crawler.download_html(
                            word_url + str(word_main['data']['id']) + '/'
                        )
                        word_annotation = self.html_parser.get_word_annotation(word_main_content)
                        cn_definition, en_definition = get_word_definition(word_main['data']['definitions'])
                        words_content.append({
                            'word': word,
                            'cn_mean': cn_definition.strip(),
                            'en_mean': en_definition.strip(),
                            'annotation': word_annotation
                        })

                    index += 1
                    self.data_saver.save_list(words_content, level)
            except BaseException:
                pass

if __name__ == '__main__':
    word_url = 'https://www.shanbay.com/bdc/vocabulary/'
    cet4_url = 'https://www.shanbay.com/wordbook/1/'
    cet6_url = 'https://www.shanbay.com/wordbook/2/'
    ielts_url = 'https://www.shanbay.com/wordbook/7/'

    crawler = CrawlerMain()
    crawler.crawler(ielts_url, word_url, 3)

