#!/usr/bin/env python3
# -*- coding: utf-8 -*-

'taobao_spider 2018/1/15 16:54 '

__author__ = 'lxy'


import json
import requests
import random
import re
import time
import threading
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities


version = '0.0.1.0'
class TaoBao(threading.Thread):
    """淘宝爬虫，继承线程类；当request.session被拒绝访问时生成新的线程

    """
    def __init__(self, k, s):
        """初始化搜索关键字、店铺名；初始化request.session

        :param k: 搜索关键字
        :param s: 店铺名
        """
        super(TaoBao, self).__init__()
        self.k = k
        self.s = s

    def func_run(self):
        """开始执行

        :return:
        """
        self.func_init_s()
        self.func_req_search_view()

    def param_user_agent(self):
        """user-agent列表

        :return:
        """
        return [
            "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
            "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
            "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
        ]

    def func_init_s(self):
        """初始化requests.session

        :return:
        """
        time.sleep(5)
        self.s = requests.session()
        for c in self.func_webdriver():
            self.s.cookies.set(c['name'], c['value'])

    def func_webdriver(self):
        """通过webdriver重新获取cookie

        :return:
        """
        dcap = dict(DesiredCapabilities.PHANTOMJS)
        dcap["phantomjs.page.settings.userAgent"] = (
            random.choice(self.param_user_agent())
        )
        _dr = webdriver.PhantomJS(
            executable_path = r'C:\Users\lxy\AppData\Local\Programs\phantomjs-2.1.1-windows\bin\phantomjs.exe',
            desired_capabilities = dcap)
        _dr.get('https://www.taobao.com/')
        return _dr.get_cookies()

    def func_req(self, url, data):
        """请求或生成新的线程

        :param url:
        :param data:
        :return:
        """
        self.req_num = 0
        if self.req_num == 0:
            req = self.s.get(url = url, params = data, timeout = random.choice(range(80, 180)), stream = True)
        else:
            self.s.headers.update({'referer': self.req_url})
            req = self.s.get(url = url, params = data, timeout = random.choice(range(80, 180)), stream = True)
        self.req_url = req.url
        _bs = BeautifulSoup(req.content, "html5lib")
        _scripts = _bs.head.find_all('script')
        if len(_scripts) == 0:
            # 当无法获取数据时生成新的线程
            # print('start new thread')
            t = TaoBao(self.k, self.s)
            t.start()
        else:
            return _scripts

    def func_req_search_view(self):
        """关键字进行搜索，主函数

        :return:
        """
        search_hearder = {
            "User-Agent":"%s" % random.choice(self.param_user_agent()),
            "accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "accept-encoding":"gzip, deflate, br",
            "accept-language":"zh-CN,zh;q=0.9,en;q=0.8",
            "referer":"https://www.taobao.com/",
        }

        self.s.headers.update(search_hearder)
        _search_url = 'https://s.taobao.com/search?'
        page = 0
        page_limit = 100 # 总页数,从返回的数据中提取
        page_size = 44 # 每页跨度,从返回的数据中提取
        default_url = [] # 解析失败时存储url
        while page < page_limit:
            _num = page * page_size
            _search_data = {'q':self.k, 'imgfile':'', 'js':'1', 'stats_click':'search_radio_all%3A1', 'ie':'utf8',
                           'sort':'sale-desc', 's':'%s' % _num}
            # self.s.cookies.update(self.cookie)
            _scripts = self.func_req(_search_url, _search_data)
            # print(_scripts)
            # print('页码：%s' % page)
            try:
                for _string in _scripts[-1].stripped_strings:
                    print('页码：%s，数据获取成功' % page)
                    _r = re.search('{"pageName":(.*?)\n', _string)
                    if _r and _num == 0:
                        data = json.loads(_r.group().rstrip(';\n'))
                        page_limit = 100 if int(data['mods']['sortbar']['data']['pager']['totalPage']) else int(data['mods']['sortbar']['data']['pager']['totalPage'])
                        page_size = int(data['mods']['sortbar']['data']['pager']['pageSize'])
                        # _total_count = int(data['mods']['sortbar']['data']['pager']['totalCount']) # 总结果数
                        # print('页数：{}；总结果数：{}'.format(page_limit, _total_count))
                        self.func_result(data, page)
                    elif _r:
                        data = json.loads(_r.group().rstrip(';\n'))
                        self.func_result(data, page)
                        time.sleep(random.choice(range(3, 5)))
                    else:
                        default_url.append(page)
                        # print('未返回数据，页码列表：{}'.format(default_url))
            except TypeError:
                # print('type error')
                continue
            page += 1
            time.sleep(2)
        else:
            # print('未返回数据，页码列表：{}'.format(default_url))
            # self.file.close()
            print('-------结束查询-------')
            print('3分钟后自动退出！')
            time.sleep(180)
            exit()


    def func_result(self, items, page):
        """数据清洗、提取

        :param items: 存放数据的json
        :param page: 页码
        :return:
        """
        self.file = open('shop_info.txt', 'a+', encoding = 'UTF-8')
        if items['mods']['itemlist']['status']:
            _item_info = items['mods']['itemlist']['data']['auctions'] # 商品数据
            for item in _item_info:
                nick = item['nick'] # 店铺名
                # if self.s == nick[0]:
                    # 处理店铺名
                # nick = item['nick']
                raw_title = item['raw_title'] # 商品标题
                # user_id = int(item['user_id'])
                view_price = item['view_price'] # 促销价的最小值，例：促销价 ¥ 79.90-99.00 取79.90
                view_fee = item['view_fee'] # 运费
                item_loc = item['item_loc'] # 发货地址
                view_sales = item['view_sales'] # 收货人数量
                comment_count = 0 if not item['comment_count'] else int(item['comment_count']) # 评论数
                detail_url = item['detail_url'] # 详情页的地址
                isTmall = item['shopcard']['isTmall'] # 是否天猫店
                _result = '店铺名：%s，商品标题：%s，地址：%s，价格：%s，运费：%s，发货地址：%s，收货人数量：%s，评论数：%s，是否天猫店：%s' % (nick,raw_title,detail_url,view_price,view_fee,item_loc,view_sales,comment_count,isTmall)
                print(_result)
                self.file.write(_result)
                print('继续查找，请等待...')
        else:
            print('参数错误')
        self.file.close()

if __name__ == '__main__':
    print('版本号：' + version)
    keyword = input('请输入搜索商品关键字：')
    shop_nick = input('请输入店铺首字：')
    print('开始搜索，整个过程预计持续2分钟，请稍候！')
    t = TaoBao(keyword, shop_nick)
    t.func_run()