# -*- coding: utf-8 -*-

import json
import logging
import os

import scrapy

from spider.settings import DATA_DIR

LOG = logging.getLogger(__name__)
BASE_DOMAIN = 'www.koolearn.com'
BASE_URL = 'https://%s' % BASE_DOMAIN


class MenuItem(scrapy.Item):
    menu = scrapy.Field()
    submenu = scrapy.Field()
    url = scrapy.Field()


class MenuSpider(scrapy.Spider):
    name = 'koolearn-menu'
    allowed_domains = [BASE_DOMAIN]
    start_urls = [BASE_URL + '/dict/fenlei_4_0_1.html']
    custom_settings = {
        'ITEM_PIPELINES': {
            'spider.pipelines.JsonWriterPipeline': 300,
        },
    }

    def parse(self, response):
        menu_ele_list = response.css('body > div.content-wrap > ul > li.left-sidebar-item')[1:]
        for menu_ele in menu_ele_list:
            menu = menu_ele.css('div.left-sidebar-item-text')[0].root.text
            submenu_ele_list = menu_ele.css('div.sider-more > a')[1:]
            for submenu_ele in submenu_ele_list:
                submenu = submenu_ele.root.text
                url = submenu_ele.attrib['href']
                yield MenuItem(menu=menu, submenu=submenu, url=url)


class VocabularyItem(MenuItem):
    name = scrapy.Field()


class VocabularyListSpider(scrapy.Spider):
    name = 'koolearn-voc-list'
    allowed_domains = [BASE_DOMAIN]
    start_urls = []
    custom_settings = {
        'ITEM_PIPELINES': {
            'spider.pipelines.JsonWriterPipeline': 300,
        },
    }

    def start_requests(self):
        menu_data_path = os.path.join(DATA_DIR, 'items-%s.txt' % MenuSpider.name)
        LOG.debug(menu_data_path)
        with open(menu_data_path) as _file:
            for line in _file:
                menu = json.loads(line)
                yield scrapy.Request(BASE_URL + menu['url'], meta=menu, dont_filter=True)

    def parse(self, response):
        voc_ele_list = response.css('body > div.content-wrap > div.left-content > div.word-wrap > div.word-title')
        for voc_ele in voc_ele_list:
            name = voc_ele.root.text
            url = voc_ele.css('a.word-more').attrib['href']
            yield VocabularyItem(menu=response.meta['menu'],
                                 submenu=response.meta['submenu'],
                                 name=name,
                                 url=url)


class WordItem(VocabularyItem):
    word = scrapy.Field()


class VocabularySpider(scrapy.Spider):
    name = 'koolearn-voc'
    allowed_domains = [BASE_DOMAIN]
    start_urls = []
    custom_settings = {
        'ITEM_PIPELINES': {
            'spider.pipelines.JsonWriterPipeline': 300,
        },
    }
    url_src = os.path.join(DATA_DIR, 'items-%s.txt' % VocabularyListSpider.name)
    item_cls = WordItem

    def start_requests(self):
        LOG.debug(self.url_src)
        with open(self.url_src) as _file:
            for line in _file:
                vocabulary = json.loads(line)
                yield scrapy.Request(BASE_URL + vocabulary['url'], meta=vocabulary, dont_filter=True)

    def parse(self, response):
        word_ele_list = response.css('body > div.content-wrap > div.left-content > div.word-wrap > div.word-box > a')
        meta_fields = 'download_timeout', 'download_slot', 'download_latency', 'depth'
        for word_ele in word_ele_list:
            word = word_ele.root.text
            url = word_ele.attrib['href']
            data = {k: v for k, v in response.meta.items() if k not in meta_fields}
            data['word'] = word
            data['url'] = url
            yield self.item_cls(**data)

        next_page = response.css('body > div.content-wrap > div.left-content > div.i-page > a.next')
        if next_page:
            url = next_page[0].attrib.get('href')
            if url:
                yield scrapy.Request(BASE_URL + url, meta=response.meta, dont_filter=True)


class NameListItem(scrapy.Item):
    name = scrapy.Field()
    url = scrapy.Field()


class NameListSpider(scrapy.Spider):
    name = 'koolearn-name-list'
    allowed_domains = [BASE_DOMAIN]
    start_urls = [BASE_URL + '/dict/ename.html']
    custom_settings = {
        'ITEM_PIPELINES': {
            'spider.pipelines.JsonWriterPipeline': 300,
        },
    }

    def parse(self, response):
        voc_ele_list = response.css('body > div.content-wrap > div.left-content > div > div.word-title')
        for voc_ele in voc_ele_list:
            name = voc_ele.root.text
            url = voc_ele.css('a.word-more').attrib['href']
            yield NameListItem(name=name, url=url)


class NameItem(scrapy.Item):
    name = scrapy.Field()
    word = scrapy.Field()
    url = scrapy.Field()


class NameSpider(VocabularySpider):
    name = 'koolearn-name'
    url_src = os.path.join(DATA_DIR, 'items-%s.txt' % NameListSpider.name)
    item_cls = NameItem
