# -*- coding: utf-8 -*-
import time
import scrapy
import json

class SohuSpider(scrapy.Spider):
    name = 'sohu_channel'
    comment_url='https://api.interaction.sohu.com/api/comments/maincomments?source_id=mp_{}&page_no=1&page_size=3&reply_count=10&type=0'
    browse_url='https://v2.sohu.com/public-api/articles/pv?articleIds={}'

    def start_requests(self):
        urls = {
            '432':"https://v2.sohu.com/integration-api/mix/region/106?secureScore=50&page=1&size=24&pvId=1551678050735pwvrTrN&channel=45",  #搞笑
                '431':"http://v2.sohu.com/integration-api/mix/region/131?size=25&adapter=pc&secureScore=50&page=1&pvId=1551680591779rkPvvuY",  #娱乐
                '433':"https://v2.sohu.com/integration-api/mix/region/101?secureScore=50&page=1&size=24&pvId=1551680643055nPqcU5d&mpId=0",  #科技
                '437':"https://v2.sohu.com/integration-api/mix/region/93?secureScore=50&page=1&size=24&pvId=1551679476867DKVvZ5r&mpId=0&channel=13",  #历史
                '439':"https://v2.sohu.com/integration-api/mix/region/103?secureScore=50&page=1&size=24&pvId=15516797204645mkyf49&channel=42",  #游戏
                '440':"https://v2.sohu.com/integration-api/mix/region/99?secureScore=50&page=1&size=24&pvId=1551680691753Swje7nB",  #美食
                '441':"https://v2.sohu.com/integration-api/mix/region/95?secureScore=50&page=1&size=24&pvId=1551680731279c4013lw",  #时尚
                '442':"https://v2.sohu.com/integration-api/mix/region/5590?page=1&size=20&mpId=&client=1&pvId=1551680778432nTC8UPt",  #体育
                '443':"https://v2.sohu.com/integration-api/mix/region/113?secureScore=50&page=1&size=24&pvId=1551679783227SUbs9E8",  #健康
                '444':"https://v2.sohu.com/integration-api/mix/region/100?secureScore=50&page=1&size=24&pvId=1551679844435WmyNeqc",  #旅游
                '445':"https://v2.sohu.com/integration-api/mix/region/94?secureScore=50&page=1&size=24&pvId=1551680865828fOhD6L3",  #财经

                # '446':"https://m.focus.cn/ajax/index/news?cityId=87&pageNo=2&limit=10",  #房产
                '434':"http://v2.sohu.com/public-api/feed?scene=CHANNEL&sceneId=25&page=1&size=20",  # 教育
                '436':"http://v2.sohu.com/public-api/feed?scene=CHANNEL&sceneId=26&page=1&size=20",  # 育儿
                '438':"http://v2.sohu.com/public-api/feed?subId=0&scene=CHANNEL&sceneId=18&page=1&size=12",  # 汽车
                }
        for cate in urls:
            item={}
            item['site_classify_id']=cate
            item['url']=urls[cate]
            yield scrapy.Request(item['url'],meta={'item': item})

    def parse(self, response):
        item = response.meta['item']
        cate=item['site_classify_id']
        data_list = json.loads(response.text)
        if 'integration' in response.url:
            for data in data_list['data']:
                item = dict()
                author_id=data.get('authorId')
                item['title'] = data.get("mobileTitle")
                lc_time=data.get("publicTime")
                if lc_time:
                    item['publish_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(lc_time/1000))
                    item['spider_time'] = time.strftime("%Y-%m-%d %X", time.localtime())
                    item['author'] = data.get("authorName")
                    item['site_id'] = "5"  # 站点分类
                    item['site_classify_id'] = cate
                    item['url_id']=data.get("id")
                    item['url'] = 'http://www.sohu.com/a/' + str(item['url_id']) + "_" + str(author_id)
                    comment_url=self.comment_url.format(item['url_id'])
                    yield scrapy.Request(comment_url, callback=self.detail_comm, meta={'item': item})

        elif 'public' in response.url:
            for data in data_list:
                item = dict()
                lc_time = data.get("publicTime")
                if lc_time:
                    item['publish_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(lc_time / 1000))
                    item['spider_time'] = time.strftime("%Y-%m-%d %X", time.localtime())
                    item['author'] = data.get("authorName")
                    item['site_id'] = "5"  # 站点分类
                    item['site_classify_id'] = cate
                    item['url_id'] = data.get("id")
                    item['authorId'] = data.get("authorId")
                    url=str(item['url_id'])+"_"+str(item['authorId'])
                    item['url'] = 'http://www.sohu.com/a/' + url
                    item['title'] = data.get("mobileTitle")
                    comment_url = self.comment_url.format(item['url_id'])
                    yield scrapy.Request(comment_url, callback=self.detail_comm, meta={'item': item})


    def detail_comm(self, response):
        item = response.meta['item']
        comm = json.loads(response.text)
        item['comment_count'] = comm.get('data').get('totalCount')
        browse_url=self.browse_url.format(item['url_id'])
        yield scrapy.Request(browse_url, callback=self.detail_browse, meta={'item': item})

    def detail_browse(self,response):
        item = response.meta['item']
        item['browse_count'] = response.text.split(':')[1].replace('}','')
        # print(item)
        yield item
