import scrapy
import jsonpath
import json
import re
from kuaishou.items import KuaishouItem

class KuaishouSpider(scrapy.Spider):
    name = 'kuaishou' # 爬虫名称
    allowed_domains = ["gifshow.com"] # 爬虫有效域
    # start_urls = [''] # 爬虫起i始url
    custom_settings = {
        "ITEM_PIPELINES" : {
           'kuaishou.pipelines.KuaishouPipeline': 300,
        },
    }

    data = {
        'type': "7",
        'page': "1",
        'coldStart': "false",
        "count": "20",
        "pv": "false",
        "id": "8",
        "refreshTimes": "7",
        "pcursor": '',
        "source": "1",
        "os": "android",
        "client_key": "3c2cd3f3",
        "sig": "826b419c614523cdccd6a42a7412abf1",
    }


    #重写start_url
    def start_requests(self):
        for i in range(1000):
            url = 'http://114.118.4.4/rest/n/feed/hot?app=0&lon=116.321451&did_gt=1532425841745&c=MYAPP_CPD&sys=ANDROID_4.4.2&mod=samsung%28SM-G955N%29&did=ANDROID_f0def12ad0237844&ver=5.8&net=WIFI&extId=18c2e22c9b4c905f4045257b36d6ce7d&country_code=CN&iuid=&appver=5.8.3.6495&max_memory=192&oc=MYAPP_CPD&ftt=&ud=0&language=zh-cn&lat=39.894986'
            # FormRequest 是Scrapy发送POST请求的方法
            yield scrapy.FormRequest(
                url=url,
                formdata=self.data,
                callback=self.parse_page,
                dont_filter=True
            )

    #解析返回结果
    def parse_page(self, response):
        item = KuaishouItem()
        json_Data=json.loads(response.text) #返回的json数据

        feeds = jsonpath.jsonpath(json_Data,'$..feeds')
        # url  = [r[0]["url"] for r in res]
        for data_one in feeds:
            for data_two in data_one:
                print(data_two)
                user_id = data_two["user_id"]  #用户id
                user_name = self.split_n(data_two["user_name"])    #用户名
                mv_url = self.mv(data_two) # mv链接
                caption = self.split_n(data_two["caption"]) #标题
                music_name = self.split_n(self.music(data_two)) #配乐名
                comment_count = data_two["comment_count"]  # 评论
                like_count = data_two["like_count"] # 赞
                view_count = data_two["view_count"] # 播放量

                item["user_id"] = user_id
                item["user_name"] = user_name
                item["mv_url"] = mv_url
                item["caption"] = caption
                item["music_name"] = music_name
                item["comment_count"] = comment_count
                item["like_count"] = like_count
                item["view_count"] = view_count
                yield item
    #获取配乐名
    def music(self,data):
        if 'soundTrack' in data:
            music_name = data['soundTrack']["name"]
        else:
            music_name = 'None'
        return music_name

    #去除/n符号 表情符号
    def split_n(self,ags):
        try:
            # python UCS-4 build的处理方式
            highpoints = re.compile(u'[\U00010000-\U0010ffff]')
        except re.error:
            # python UCS-2 build的处理方式
            highpoints = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')

        resovle_value = highpoints.sub(u'??', ags)

        return re.sub('\n','-',resovle_value)

    def mv(self,data):
        if 'main_mv_urls' in data:
            return data["main_mv_urls"][0]["url"]

        elif 'audioUrls' in data :
            return data['music']['audioUrls'][0]["url"]

        elif 'ext_params'in data:
            url = data["ext_params"]["atlas"]["music"]
            return 'http://ali2.a.yximgs.com'+url
