# -*- coding: utf-8 -*-
import csv
import json
import scrapy
import requests
from lxml import html as htm

# 模拟浏览器
USER_AGENT = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"

class AppSpider(scrapy.Spider):
    name = 'app'

    def start_requests(self):
        #构造爬取地址列表
        urls = []

        r = requests.get("http://app.mi.com/")

        html = htm.fromstring(r.text)

        id_list = html.xpath('//ul[@class="category-list"]/li/a/@href')

        type_list = []

        # 保存类别标签 id
        for i in range(len(id_list)):
            type_list.append(id_list[i].split("/")[-1])

        for i in type_list[0:1]:
            urls.append("http://app.mi.com/categotyAllListApi?page=0&categoryId={}&pageSize=20".format(i))

        #写入成csv表格标题
        data = [['name', 'icon', 'type', 'package', 'company', 'size', 'version', 'features', 'update_time']]

        #打开文件句柄
        file = open('app.csv','a+',encoding ='utf-8', newline="")

        #csv操作对戏
        csv_writer = csv.writer(file)

        #写入csv表头
        csv_writer.writerows(data)

        #关闭文件流
        file.close()

        for url in urls:
            yield scrapy.Request(url=url,headers={"User-Agent": USER_AGENT},callback=self.parse)

    #解析请求结果
    def parse(self, response):
        if response.body:
            result = json.loads(response.body)
            data = result["data"]
            total = []
            for i in data:
                # 图标
                icon = i["icon"]

                # 名字
                name = i["displayName"].replace("&middot;","")

                # 类型
                type_id = i["level1CategoryName"]

                # 包名
                package = i["packageName"]

                r = requests.get("http://app.mi.com/details?id={}".format(package))

                html = htm.fromstring(r.text)

                companyName = html.xpath('//div[@class="intro-titles"]/p[1]/text()')[0]

                # 软件大小
                size = html.xpath('//ul[@class=" cf"]/li[2]/text()')[0]

                # 版本
                version = html.xpath('//ul[@class=" cf"]/li[4]/text()')[0]

                # 更新时间
                update_time = html.xpath('//ul[@class=" cf"]/li[6]/text()')[0]

                # 新版特性
                features = html.xpath('//div[@class="app-text"]/p[@class="pslide"][2]/text()')

                if features:
                    features = features[0]
                else:
                    features = ""

                total.append([name, icon, type_id, package, companyName, size, version,features,update_time])

            file = open('app.csv','a+',encoding ='utf-8', newline="")

            csv_writer = csv.writer(file)

            csv_writer.writerows(total)

            file.close()
