# coding=utf8
import sys
import re
import os
import copy
import time
from lxml import etree
from scrapy_car.spiders.base_spider import SiMaRedisSpider
from scrapy_car._items import BbsData, BbsTask
from scrapy import Request


class Xitek(SiMaRedisSpider):
    name = "xitek"
    custom_settings = {
        # 'DOWNLOAD_DELAY': 1,
        # 'CONCURRENT_REQUEST': 1,
        'ITEM_PIPELINES': {
            'scrapy_car._pipelines.BbsPipeLines': 300,
        }
    }

    pc_base_url = 'http://auto.xitek.com'

    domain = {
        'model': 477,
        'brand': 480,
    }

    if 'development' != os.getenv('RUNTIME_ENV'):
        start_urls = [
            # {"method": 'get_index'},
            {'method': 'get_list', 'domain': 'model'},
        ]

    def start_requests(self):
        for data in self.start_urls:
            yield self.make_requests_from_url(data)

    def get_list(self, data):
        if 'url' not in data:
            url = '{}/{}/{}-1.html'.format(self.pc_base_url, data['domain'], self.domain[data['domain']])
        else:
            url = data['url']
        meta = {'task': data}
        return Request(url, self.parse_list, headers=self.headers(), errback=self.bba_error_back, meta=meta)

    def _extract_next_page(self, response, html_xpath, next_page=False):
        task = response.meta['task']
        page_list = []
        page_xpath = html_xpath.xpath('//div[@class="yi"]/a')
        if next_page is False:
            if page_xpath:
                page_total = page_xpath[-1].get('href')
                page_total = re.findall('-([0-9]+).htm', page_total)[0]
                for pn in range(2, int(page_total) + 1):
                    url = '{}/{}/{}-{}.html'.format(
                        self.pc_base_url, task['domain'], self.domain[task['domain']], pn
                    )
                    page_list.append(url)
        else:
            if page_xpath[-2].text == '下一页':
                url = '{}/{}/{}'.format(self.pc_base_url, task['domain'], page_xpath[-2].get('href'))
                page_list.append(url)
        return page_list

    def _extract_model_list(self, response, html_xpath):
        bbs_list = []
        bbs_list.append('test')
        return bbs_list

    def parse_list(self, response):
        task = response.meta['task']
        try:
            html_xpath = etree.HTML(response.text)
            page_one_xpath = html_xpath.xpath('//div[@class="yi"]/strong[3]/text()')
            if page_one_xpath:
                if int(page_one_xpath[0]) == 1:
                    for url in self._extract_next_page(response, html_xpath):
                        next_task = copy.copy(task)
                        next_task['url'] = url
                        item = BbsTask(next_task)
                        yield item

            parse_fun = '_extract_{}_list'.format(task['domain'])
            if hasattr(self, parse_fun):
                parse_fun = getattr(self, parse_fun)
                for bbs_task in parse_fun(response, html_xpath):
                    print(bbs_task)
            else:
                raise ValueError("Don't have {} func".format(parse_fun))

        except Exception as e:
            yield self.parse_error(e, task, BbsTask)
            raise
