# -*- coding: utf-8 -*-
import scrapy
import re
from scrapy import Request
import json
import time



class Tb2Spider(scrapy.Spider):
    # 爬虫名
    name = 'tb2'
    # 爬虫允许的域名
    allowed_domains = ['taobao.com']
    #start_urls = ['http://taobao.com/']
    # 爬虫进行模拟登录的url
    login_url = 'https://login.taobao.com/member/login.jhtml'

    """
    # 爬虫要抓取数据的url
    crawl_urls = 'https: // buyertrade.taobao.com/trade/itemlist/list_bought_items.htm'
    """

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:35.0) Gecko/20100101 Firefox/35.0',
        'Content-Type': 'application/x-www-form-urlencoded',
        'Connection': 'Keep-Alive'
    }

    # 模拟登录需要提交的用户名
    #username = '（此处是自己的淘宝用户名或者店铺名）'
    username = '翔翼风'
    # 构建模拟登录需要提交的表单数据
    post_data = {
        'TPL_username': username,
        'TPL_password': '15334183661lm',
        'ncoSig': '',
        'ncoSessionid': '',
        'ncoToken': 'd801d3c69349',
        'slideCodeShow': 'false',
        'useMobile': 'false',
        'lang': 'zh_CN',
        'loginsite': '0',
        'newlogin': '0',
        'TPL_redirect_url': 'http://buyertrade.taobao.com/trade/itemlist/list_bought_items.htm?spm=a1z02.1.a2109.d1000368.5d3a782dr6KjrH&nekot=1470211439694',
        'from': 'tb',
        'fc': 'default',
        'style': 'default',
        'css_style': '',
        'keyLogin': 'false',
        'qrLogin': 'true',
        'newMini': 'false',
        'newMini2': 'false',
        'tid': '',
        'loginType': '3',
        'minititle': '',
        'minipara': '',
        'pstrong': '',
        'sign': '',
        'need_sign': '',
        'isIgnore': '',
        'full_redirect': '',
        'sub_jump': '',
        'popid': '',
        'callback': '',
        'guf': '',
        'not_duplite_str': '',
        'need_user_id': '',
        'poy': '',
        'gvfdcname': '10',
        'gvfdcre': '68747470733A2F2F6C6F67696E2E74616F62616F2E636F6D2F6D656D6265722F6C6F676F75742E6A68746D6C3F73706D3D61317A30392E322E37353438',
        'from_encoding': '',
        'sub': '',
        'TPL_password_2': '9b8f47092a216b0df4f68ee751c65ba430627e81b09029f29be8d6d1e24b62b8338222b95e759f9877f0051e096ae285181621f1',
        'loginASR': '1',
        'loginASRSuc': '1',
        'allp': '',
        'oslanguage': 'zh-CN',
        'sr': '1920*1080',
        'osVer': 'windows|6.1',
        'naviVer': 'chrome|67.0339687',
        'osACN': 'Mozilla',
        'osAV': '5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36',
        'osPF': 'Win32',
        'miserHardInfo': '',
        'appkey': '00000000',
        'nickLoginLink': '',
        'mobileLoginLink': 'https://login.taobao.com/member/login.jhtml?redirectURL=http://buyertrade.taobao.com/trade/itemlist/list_bought_items.htm?spm=a1z02.1.a2109.d1000368.5d3a782dr6KjrH&nekot=1470211439694&useMobile=true',
        'showAssistantLink': '',
        'um_token': 'HV02PAAZ0bb0767c3af',
        'ua': 'rQcXNgSTHZhdvYEp94Q9LWm3tf/rWXTklo5KCcpiO9WwblFoikTWZTfZQ7wfTsnnTb8z6gm8TsTTJ7ZyUxxBEdKEiqnZosTTb8r26zmTnsZwpPVsSHbFSbBM/qwfzTTBrT5S6K+aTjsnj6UfP/T2Tj+teh9f8plTIb826zmgsjTQTVj1vhovBOLEukvAHyptk38gOP4Tth2VR3CpC+jJ+IPZXx71zeO8I8'

    }

    # 替换所有的HTML标签
    def re_html(self, data):
        # 替换抓取数据中的html标签
        try:
            message = str(data)
            re_h = re.compile('</?\w+[^>]*>')  # html标签
            ret1 = re_h.sub('', message)
            ret2 = re.sub(r'\n', '', ret1)
            ret3 = re.sub(r'\u3000', '', ret2)
            return ret3
        except:
            pass

    # 抓取登录链接
    def start_requests(self):
        yield scrapy.Request(
            url=self.login_url,
            meta={'cookiejar': 1},
            headers=self.headers,
            callback=self.post_login
        )

    # 提交登陆请求
    def post_login(self, response):
        yield scrapy.FormRequest.from_response(
            response=response,
            method='POST',
            meta={'cookiejar': response.meta['cookiejar']},
            formdata=self.post_data,
            callback=self.search_token,
            dont_filter=True
        )

    # 登录成功后， 获取token值拼接url进行请求，跳过验证
    def search_token(self, response):

        data0 = str(response.body)
        # token0 = re.search(r'&token=.*?&', data0)
        # data1 = token0.group()
        # token1 = re.sub(r'&', '', data1)
        # token2 = re.sub(r'token=', '', token1)

        nekot0 = re.search(r'&nekot=.*?}', data0)
        data1 = nekot0.group()
        nekot1 = re.sub(r'&', '', data1)
        nekot2 = re.sub(r'nekot=', '', nekot1)
        nekot3 = re.sub(r'\'}', '', nekot2)
        nekot4 = re.sub(r'\\', '', nekot3)
        token_URL = 'https://passport.alibaba.com/mini_apply_st.js?site=0&token=%s&callback=stCallback6' % nekot4

        yield scrapy.Request(
            url=token_URL,
            meta={'cookiejar': response.meta['cookiejar']},
            headers=self.headers,
            callback=self.parse_crawl_url
        )

    # 请求需要抓取的url，获取数据
    def parse_crawl_url(self, response):
        print("登陆淘宝成功！")

        # 开始爬取数据
        key = '工装裤'
        # 只爬取2页的内容
        for i in range(2):
            page_url = 'https://s.taobao.com/search?q=' + key + '&s=' + str(i * 44)
            yield Request(
                meta={'cookiejar': response.meta['cookiejar']},
                url=page_url,
                headers=self.headers,
                callback=self.page
            )

    def page(self,response):
        print("成功得到页面！！")

        """
        #title=response.xpath("/html/head/title").extract()
        body=response.body.decode("utf-8","ignore")
        pat_id='"nid":"(.*?)"'
        all_id=re.compile(pat_id).findall(body)
        print(all_id)
        """

        #现在发现个问题：天猫的网址和淘宝的网址是不一样的！
        #第一步：爬取每个商品的div标签
        div_list = response.xpath('//div[@class="item J_MouserOnverReq item-ad  "]')

        #分组查看内容
        for div in div_list:
            id = div.xpath('.//div[@class="row row-2 title"]/a/@data-nid').extract_first()
            judgement = div.xpath('.//li[@class="icon J_IconPopup J_MouseEneterLeave"]/a/span/@class').extract_first()    #如果是“金牌卖家”——judgement = "icon-service-jinpaimaijia"
            if judgement == "None":
                #说明是天猫的店铺
                good_url = "https://detail.tmall.com/item.htm?&id=" + id
            else:
                #说明是金牌卖家（非天猫店铺）
                good_url = "https://item.taobao.com/item.htm?id=" + id
            print(good_url)
            yield Request(url=good_url,
                          meta={'cookiejar': response.meta['cookiejar']},
                          headers=self.headers,
                          callback=self.parse,
                          dont_filter=True
                          )

    def parse(self, response):
        print("得到真正的页面了！！")
        pass
