import scrapy
from scrapy.http import Request
import json
from appInfo.items import AppinfoItem
import requests
import time


class AppInfoSpider(scrapy.Spider):
    name = 'app_info'
    allowed_domains = ['qq.com']
    start_urls = ['http://sj.qq.com/myapp/']
    ajax = "http://sj.qq.com/myapp/searchAjax.htm?kw="
    # 应用信息
    app_info_url = "http://sj.qq.com/myapp/detail.htm?apkName="
    # 评论信息
    comment_url = "http://sj.qq.com/myapp/app/comment.htm?apkName="  # com.tencent.token
    # 自定义头
    head = {
        'Cookie': "session_uuid=131faa31-652d-423a-b0fd-5d4fa05fc827; pgv_pvid_new=_1264f3f631e; pac_uid=0_5a93c635bd7ab; pgv_pvi=4741056512; ts_refer=www.sogou.com/link; sjqqcomUV=sjqqcomUV; JSESSIONID=aaaJqQghIl9Klh7rRNChw; pgv_si=s501280768; pgv_info=ssid=s1571984448; ts_last=sj.qq.com/myapp/detail.htm; pgv_pvid=2713538380; ts_uid=8925701349",
        'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0"
    }

    crawl_app_name = "E:\\PycharmPerProject\\appInfo\\app_name\\app_name_40001_50887.txt"

    # 重载usls
    def start_requests(self):
        with open(AppInfoSpider.crawl_app_name, 'rb') as f:
            for line in f:
                param = line.strip().decode('utf-8')
                # print(param)
                print(AppInfoSpider.ajax + param)
                yield Request(AppInfoSpider.ajax + param, meta={'appName': param})

    def parse(self, response):
        # print(response.body.decode('utf-8'))
        js = json.loads(response.body.decode('utf-8'))
        s = js.get('obj').get('items')[0].get('pkgName')
        yield Request(AppInfoSpider.app_info_url + s, meta={'appName': response.meta['appName'], 'pkgName': s},
                      callback=self.get_app_info_parse)

    def get_app_info_parse(self, response):
        comment_number_temp = '异常'
        try:
            t = requests.get(AppInfoSpider.comment_url + response.meta['pkgName'], params=AppInfoSpider.head).text
            # print(t)
            comment_number_temp = json.loads(t).get('obj').get('total')
        except Exception as e:
            print("获取评论失败！%s", e, "请求地址为：", AppInfoSpider.comment_url + response.meta['pkgName'])

        # 获取时间进行转换
        update_temp = response.xpath("//div[@class='det-othinfo-data']")[1].xpath("./@data-apkpublishtime").extract()
        try:
            get_update = time.strftime("%Y-%m-%d", time.localtime(int(update_temp[0])))
        except Exception as e:
            get_update = update_temp
            print("时间转换异常！ %s", e)
        yield AppinfoItem(
            src_app_name=response.meta['appName'],
            search_app_name=response.xpath("//div[@class='det-name-int']/text()").extract(),
            search_pkg_name=response.meta['pkgName'],
            user_score=response.xpath("//div[@class='com-blue-star-num']/text()").extract(),
            downloads=response.xpath("//div[@class='det-ins-num']/text()").extract(),
            comment_number=comment_number_temp,
            version=response.xpath("//div[@class='det-othinfo-data']")[0].xpath('./text()').extract(),
            update_time=get_update,
            developer=response.xpath("//div[@class='det-othinfo-data']")[2].xpath('./text()').extract(),
            app_info=response.xpath("//div[@class='det-app-data-info']")[0].xpath('./text()').extract(),
            app_classification=response.xpath("//a[@class='det-type-link']/text()").extract()
        )

        # save_item = AppinfoItem()
        # # app 源文件名
        # save_item['src_app_name'] = response.meta['appName']
        # # app 搜索的名称
        # save_item['search_app_name'] = response.xpath("//div[@class='det-name-int']/text()").extract()
        # # app 搜索包名
        # save_item['search_pkg_name'] = response.meta['pkgName']
        # # app 用户评分
        # save_item['user_score'] = response.xpath("//div[@class='com-blue-star-num']/text()").extract()
        # # app 下载次数
        # save_item['downloads'] = response.xpath("//div[@class='det-ins-num']/text()").extract()
        # # app 评论次数
        # save_item['comment_number'] = response.xpath("//id[@class='J_CommentCount']/text()").extract()
        # # app 版本号
        # save_item['version'] = response.xpath("//div[@class='det-othinfo-data']")[0].xpath('./text()').extract()
        # # app 更新时间
        # save_item['update_time'] = response.xpath("//div[@class='det-othinfo-data']")[1].xpath('./text()').extract()
        # # app 开发商
        # save_item['developer'] = response.xpath("//div[@class='det-othinfo-data']")[2].xpath('./text()').extract()
        # # app 应用信息
        # save_item['app_info'] = response.xpath("//div[@class='det-app-data-info']")[0].xpath('./text()').extract()
        # # app 分类
        # save_item['app_classification'] = response.xpath("//a[@class='det-type-link']/text()").extract()
