# -*- coding: utf-8 -*-
import logging
import re
from HTMLParser import HTMLParser

import time
from scrapy import Spider, Request, FormRequest, Selector
from taobao_category.items import ShopCategoryItem, ShopMainCategory
from taobao_category.settings_local import taobao_info, taobao_account, SHOP_CUSTOMER, TMALL_SHOP_KEY
from taobao_category.spiders.spider_utils import parse_detail_item_byxpath
from taobao_category.sqlalchemy_engine import DBSession
from taobao_category.utils.global_manager import get_value
from taobao_category.utils.utils import parse_url

logger = logging.getLogger(__name__)

class TaobaoSpider(Spider):

    name = "TaobaoSpider"
    # allowed_domains = taobao_info['allowed_domains']

    # 68412061(b店), 62656165
    # 57301059, 57054001,
    shop_id_list = []

    def __init__(self, name=None, **kwargs):
        super(Spider, self).__init__(name, **kwargs)
        self.session = DBSession()
        self.time = 2
        self.shop_id_list = get_value('shop_id_list', [])

    #抓取登录链接
    def start_requests(self):
        return [Request(taobao_info['login_url'],
            meta = {'cookiejar' : 1, 'is_dynamic_page': 1},
            callback = self.post_login)]

    #提交登录请求
    def post_login(self, response):
        post = taobao_info['post']
        form_inputs = response.xpath('//form[@id="J_Form"]//input')
        for input in form_inputs:
            name = input.xpath('@name').extract()
            value = input.xpath('@value').extract()
            if name and post.get(name[0], None) is not None:
                post.update({
                    name[0]: value[0] if value else ''
                })
        post.update(taobao_account)
        return [FormRequest.from_response(response,
            method = 'POST',
            meta = {'cookiejar' : response.meta['cookiejar']},
            headers = taobao_info['login_headers'],
            formdata = post,
            callback = self.search_token,
            dont_filter = True,
        )]

    #登录成功之后 抓取出token
    def search_token(self,response):
        tokenPattern = re.compile('https://passport.alibaba.com/mini_apply_st.js\?site=0&token=(.*?)&')
        tokenMatch = re.search(tokenPattern,response.body)
        if tokenMatch:
            logger.log(1,"token found")
            token = tokenMatch.group(1)
            tokenURL = 'https://passport.alibaba.com/mini_apply_st.js?site=0&token=%s&callback=stCallback6' % token

            return [Request(tokenURL,
                meta = {'cookiejar' : response.meta['cookiejar']},
                callback = self.search_st,
                )]
        else:
            logger.warning("login failed[on search_token]")

    #抓取出st code
    def search_st(self,response):
        pattern = re.compile('{"st":"(.*?)"}',re.S)
        result = re.search(pattern,response.body)
        if result:
            logger.log(1,"st code found")
            st = result.group(1)
            stURL = 'https://login.taobao.com/member/vst.htm?st=%s&TPL_username=%s' % (st, taobao_account['TPL_username'])
            return [Request(stURL,
                method = "GET",
                meta = {'cookiejar' : response.meta['cookiejar']},
                callback = self.login_process,
            )]
        else:
            logger.warning("login failed[on search_st]")

    #用st code 进行登录
    def login_process(self,response):
        pattern = re.compile('top.location.href = "(.*?)"',re.S)
        match = re.search(pattern,response.body)
        if match:
            logger.log(1, 'login success')
            for shop_id in self.shop_id_list:
                time.sleep(self.time)
                url = SHOP_CUSTOMER % shop_id
                yield Request(
                    url,
                    method = 'GET',
                    meta = {'cookiejar' : response.meta['cookiejar'], 'shop_id': shop_id},
                    dont_filter = True,
                    callback = self.start_crawl,
                )
        else:
            logger.warning("login failed[on search_st]")

    #开始爬取订单列表
    def start_crawl(self, response):
        '''
        解析
        :param response: 
        :return: 
        '''
        if response.status == 999:
            return
        # 获取商品详情item
        # 爬取店铺信息
        if TMALL_SHOP_KEY in str(response.url):
            # 天猫店
            # 解析店铺url
            extra_info = response.xpath("//div[contains(@class, 'extra-info')]").extract()
            extra_info = extra_info[0]
            html_parser = HTMLParser()
            extra_info = html_parser.unescape(extra_info)
            url = Selector(text=extra_info).xpath('//li[contains(@class, "shopkeeper")]/div[contains(@class, "right")]/a/@href').extract()
            if url:
                time.sleep(self.time)
                # yield Request('https:' + url[0], meta={'cookiejar': response.meta['cookiejar'], 'shop_id': response.request.meta['shop_id'], 'shop_type': 'c', 'is_dynamic_page': 1}, callback=self.parse_shop_info)
            items = response.xpath('//div[contains(@class, "J_TItems")]//dl[contains(@class, "item")]')
            temp = parse_detail_item_byxpath(response, items)
            if temp:
                time.sleep(self.time)
                if temp['href']:
                    yield Request('https:' + temp['href'], meta={'cookiejar': response.meta['cookiejar'], 'shop_id': response.request.meta['shop_id'], 'is_dynamic_page': 1}, callback=self.tmall_detail_parse)

        else:
            # 解析店铺详情也
            saller_a_url_list = response.xpath("//span[contains(@class, 'shop-rank')]/a/@href").extract()
            if saller_a_url_list:
                time.sleep(self.time)
                # yield Request('https:' + saller_a_url_list[0], meta={'cookiejar' : response.meta['cookiejar'], 'shop_id': response.request.meta['shop_id'], 'shop_type': 'c', 'is_dynamic_page': 1}, callback=self.parse_shop_info)
            data_ajax_url = response.xpath("//input[@id='J_ShopAsynSearchURL']/@value").extract()
            if data_ajax_url:
                # 解析出域名
                protocol, host, _ = parse_url(response.url)
                uri = protocol + '://' + host + data_ajax_url[0]
                time.sleep(self.time)
                yield Request(uri, meta={'cookiejar' : response.meta['cookiejar'], 'shop_id': response.request.meta['shop_id']}, callback=self.taobao_ajax_parse)

    def taobao_ajax_parse(self, response):
        '''
        解析淘宝店铺商品ajax请求的数据
        :param response: 
        :return: 
        '''
        main_info = response.body.decode(response.encoding).encode('utf-8').decode('string_escape')
        items = Selector(text=main_info).xpath('//dl')
        item_obj = ShopCategoryItem()
        temp = parse_detail_item_byxpath(response, items)
        if temp:
            # yield item_obj
            if temp['href']:
                yield Request('https:'+temp['href'], meta={'cookiejar' : response.meta['cookiejar'], 'shop_id': response.request.meta['shop_id']}, callback=self.taobao_detail_parse)

    def parse_shop_info(self, response):
        '''
        解析天猫或淘宝店铺的信息
        :param response: 
        :return: 
        '''
        item_obj = ShopMainCategory()
        item_obj['shop_id'] = response.request.meta['shop_id']
        item_obj['type'] = response.request.meta['shop_type']
        main_category = response.xpath('//div[@id="shop-rate-box"]/div[contains(@class, "personal-info")]/div[contains(@class, "col-sub")]/div[1]/div[contains(@class, "bd")]/div[1]/ul/li[1]/a/text()').extract()
        main_category = main_category[0].encode('utf-8') if main_category else ''
        item_obj['main_category'] = main_category
        if main_category:
            yield item_obj

    def tmall_detail_parse(self, response):
        '''
        解析天猫店详情页
        :param response: 
        :return: 
        '''
        # scrapy.Request()
        cat_id = response.xpath('//input[@name="rootCatId"]/@value').extract()
        cat_id = cat_id[0].encode('utf-8') if cat_id else ''
        if cat_id:
            item_obj = ShopCategoryItem()
            item_obj['shop_id'] = response.request.meta['shop_id']
            item_obj['cat_id'] = cat_id
            yield item_obj

    def taobao_detail_parse(self, response):
        '''
        处理淘宝c店详情页
        :return: 
        '''
        cat_id = response.xpath('//div[@id="J_Pine"]/@data-catid').extract()
        cat_id = cat_id[0].encode('utf-8') if cat_id else ''
        if cat_id:
            item_obj = ShopCategoryItem()
            item_obj['shop_id'] = response.request.meta['shop_id']
            item_obj['cat_id'] = cat_id
            yield item_obj