# -*- coding: utf-8 -*-
import json
from collections import OrderedDict

import time
from bs4 import BeautifulSoup

import re
from pprint import pprint
from selenium import webdriver
import scrapy
from copy import deepcopy

import requests

class TencentSpider(scrapy.Spider):
    name = 'tencent'
    allowed_domains = ['sj.qq.com']
    start_urls = ['https://sj.qq.com/myapp/category.htm?orgame=1']

    def parse(self, response):
        #获取分类
        menu_list = response.xpath('//ul[@class="menu"]/li[1]/ul/li')
        for li in menu_list:
            item = {}
            item = OrderedDict(item)
            item['classification'] = li.xpath('./a/text()').extract_first()
            item['url'] = li.xpath('./a/@href').extract_first()
            if item['url'] == 'javascript:void(0);':
                break
            i = 0
            while True:
                # print(i)
                # item['url'] = li.xpath('./a/@href').extract_first()
                # if i > 5:
                #     break
                if i == 0:
                    item['url_list'] = 'https://sj.qq.com/myapp/category.htm' + item['url'] + '&pageSize=20&pageContext={}'.format(0)
                else:
                    item['url_list'] = 'https://sj.qq.com/myapp/category.htm' + item['url'] + '&pageSize=20&pageContext={}'.format((i+1)*20)
                try:
                    # print(item['url_list'])
                    req = requests.get(item['url_list']).text
                    soup = BeautifulSoup(req,'html.parser')
                    r =soup.find_all('div',class_ = 'app-info clearfix')
                    if r:
                        yield scrapy.Request(
                            url=item['url_list'],
                            callback=self.parse_app_list,
                            meta={"item": deepcopy(item)})
                    else:
                        break
                    # print(r)
                        # h = scrapy.Request(url=item['url_list'])
                    # h = url.xpath('//div[@class="load-more-btn"]/a/@href')
                    # print(url)
                    # urls = json.loads(url)
                    # print(h)
                except:
                    pass
                finally:
                    i += 1
    #获取应用列表
    def parse_app_list(self, response):
        item = response.meta['item']
        app_list = response.xpath('//ul[@class="app-list clearfix"]/li')
        for li in app_list:
            item['app_name'] = li.xpath('./div/div/a[2]/@appname').extract_first()
            item['app_url'] = li.xpath('./div/div/a[1]/@href').extract_first()
            item['app_url'],item['apk_name'] = self.Handle_app_url(item['app_url'])
            # print(dict(item))
            yield scrapy.Request(
                url=item['app_url'],
                callback=self.parse_app_info,
                meta={'item':deepcopy(item)})

    def Handle_app_url(self,url):
        url = url.split("?")[-1]
        apk_name = url
        url = "https://sj.qq.com/myapp/detail.htm?" + url
        return url,apk_name
    #获取应用详情
    def parse_app_info(self,response):
        item = response.meta['item']
        item['app_download'] = response.xpath('//div[@class="det-ins-num"]/text()').extract_first()
        item['app_mark'] = response.xpath('//div[@class="com-blue-star-num"]/text()').extract_first()
        item['app_version'] = response.xpath('//div[@class="det-othinfo-data"][1]/text()').extract_first()
        # 获取应用更新时间
        # options = webdriver.ChromeOptions()
        # options.add_argument('headless')
        # options.add_argument(
        #     "user-agent='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'")
        # browser = webdriver.Chrome(chrome_options=options)
        # browser.get(item['app_url'])
        # input_first = browser.find_element_by_id("J_ApkPublishTime")
        # item['app_update'] = input_first.text
        # print(input_first.text)
        # browser.close()
        try:
            item['app_update'] = response.xpath('//div[@id="J_ApkPublishTime"]/@data-apkpublishtime').extract_first()
            timeArray = time.localtime(int(item['app_update']))
            item['app_update'] = time.strftime("%Y-%m-%d", timeArray)
        except:
            pass
        item['app_info'] = response.xpath('//div[@class="det-app-data-info"]/text()').extract_first()
        try:
            item['app_info'] = re.sub('\r','',item['app_info'])
        except Exception as e:
            pass
        # app_comment = response.xpath('//ul[@id="J_DetShowCommentList"]/li/div/div[1]/div[1]/text()')
        # print(app_comment)
        item['app_comment_url'] = "https://sj.qq.com/myapp/app/comment.htm?{}&p=1&contextData=".format(item['apk_name'])
        res = requests.get(item['app_comment_url']).content
        res = json.loads(res)
        try:
            res = res['obj']['commentDetails']
        except:
            pass
        # item['app_coment'] = res
        # print(item)
        content = []
        try:
            for i in res:
                # print(len(i['content']))
                # item['app_coment'] = i['content']
                content.append(i['content'])
        except:
            pass

        if content:
            item['app_coment'] = content
        else:
            item['app_coment'] = '暂无评论'
        yield item
        # print(item)
            # for key in range(len(res)):
        #     # print(key, ' value : ', res[key])
        #     print(res[key])
            # for i in res[key]:
            #     item['comment'] = i['content']
            #     print(item)
        # yield scrapy.Request(
        #     url=item['app_comment_url'],
        #     callback=self.handle_comment,
        #     meta={'item':deepcopy(item)})
        # for li in app_comment:
        #     item['app_comment'] = li.xpath('./div/div[1]/div[1]/text()').extract_first()
        #     item['app_comment'] = item['app_comment'] + li.xpath('./div/div[1]/div[3]/text()').extract_first()
        #     item['app_comment'] = item['app_comment'] + li.xpath('./div[1]/div[2]/text()').extract_first()
        #     print(dict(item))
    # def handle_comment(self,response):
    #     item = response.meta['item']
    #     print(dict(item))

