# -*- coding: utf-8 -*-
import scrapy
import re
from jsonpath import jsonpath
import time
import json
from scrapy_redis.spiders import RedisSpider



class NewsSpider(RedisSpider):
    name = 'news'
    allowed_domains = ['mapi.kyboye.com']
    # start_urls = ['https://mapi.kyboye.com/news/index?psize=10&skip=0&type=1&schid=&speid=20115100&_t=1588579494355']
    headers = {'Connection': 'Keep-Alive',
               'Accept-Encoding': 'gzip',
               'Cookie': 'FDX_auth=1325f4fb08CSx5Kl9%2BMj7LhbY2kBLJxBWHsoQQEqrZPyAEORYJ4fQQskQAZxyzMyE70SsSFNrqKEYILFC%2FHoiYIw; FDX_sid=w23565396_b58fe4c3506aa8e932aa70',
               'KY-APPVER': '3.4.1',
               'KY-APPCHG': 'ky_360',
               'KY-APPTYPE': '2',
               'KY-SPEID': '20115100',
               'KY-APPVERS': '96',
               'KY-SYSVER': '5.1.1',
               'KY-SCHID': '1033',
               'KY-YEAR': '2021',
               'KY-SYSDEV': 'samsung+SM-N960F',
               'KY-TOKEN': 'ff30493f5c39982f38a316b137713bda',
               'KY-UUID': '8be44003623e3edc5dc5480f9f65caee',
               'User-Agent': 'KaoYanBang AipBot/1.0 (KaoYanClub-Android/3.4.1; android/5.1.1; samsung+SM-N960F)'}
    redis_key = 'kaoyan'

    def __init__(self, *args, **kwargs):
        domain = kwargs.pop("domains", "")
        self.alllowed_domains = filter(None, domain.split(','))
        super(NewsSpider, self).__init__(*args, **kwargs)

    def parse(self, response):
        jsobj = json.loads(response.body)
        name = jsonpath(jsobj, '$.res.cate[*].name')
        id = jsonpath(jsobj, '$.res.cate[*].id')
        category_list = []
        for i in range(len(name)):
            category_dict = {
                'name': name[i],
                'id': id[i]
            }
            category_list.append(category_dict)
        for cate in category_list:
            cid = cate['id']
            c_name = cate['name']
            if c_name == '报考经验':
                skip = 0
                while True:
                    if skip<=4000:
                        t = timestamp()
                        url = 'https://mapi.kyboye.com/news/article/list?cid={}&sid=1033&skip={}&wd=&scid=0&speid=20115100&psize=20&type=1&_t={}'.format(
                            cid, skip, t)
                        data = {
                            'c_name': c_name
                        }
                        yield scrapy.Request(
                            url=url,
                            callback=self.whi_run,
                            meta={'huiger': data},
                            dont_filter=True
                        )
                        skip += 20
                    else:
                        break

            elif c_name == '政策新闻':
                skip = 0
                while True:
                    if skip<=4000:
                        t = timestamp()
                        url = 'https://mapi.kyboye.com/news/article/list?cid=201&sid=1033&skip=0&wd=&scid={}&speid=20115100&psize=20&type=1&_t={}'.format(
                            cid, skip, t)
                        data = {
                            'c_name': c_name
                        }
                        yield scrapy.Request(
                            url=url,
                            callback=self.whi_run,
                            meta={'huiger': data},
                            dont_filter=True
                        )
                        skip += 20
                    else:
                        break

            elif c_name == '考研心路':
                skip = 0
                while True:
                    if skip<=4000:
                        t = timestamp()
                        url = 'https://mapi.kyboye.com/news/article/list?cid={}&sid=1033&skip={}&wd=&scid=0&speid=20115100&psize=20&type=1&_t={}'.format(
                            cid, skip, t)
                        data = {
                            'c_name': c_name
                        }
                        yield scrapy.Request(
                            url=url,
                            callback=self.whi_run,
                            meta={'huiger': data},
                            dont_filter=True
                        )
                        skip += 20
                    else:
                        break


            elif c_name == '公共课复习':
                skip = 0
                while True:
                    if skip<=4000:
                        t = timestamp()
                        url = 'https://mapi.kyboye.com/news/article/list?cid={}&sid=1033&skip={}&wd=&scid=0&speid=20115100&psize=20&type=1&_t={}'.format(
                            cid, skip, t)
                        data = {
                            'c_name': c_name
                        }
                        yield scrapy.Request(
                            url=url,
                            callback=self.whi_run,
                            meta={'huiger': data},
                            dont_filter=True
                        )
                        skip += 20
                    else:
                        break

            elif c_name == '复试调剂':
                skip = 0
                while True:
                    if skip<=4000:
                        t = timestamp()
                        url = 'https://mapi.kyboye.com/news/article/list?cid={}&sid=1033&skip={}&wd=&scid=0&speid=20115100&psize=20&type=1&_t={}'.format(
                            cid, skip, t)
                        data = {
                            'c_name': c_name
                        }
                        yield scrapy.Request(
                            url=url,
                            callback=self.whi_run,
                            meta={'huiger': data},
                            dont_filter=True
                        )
                        skip += 20
                    else:
                        break

            else:
                pass

    def whi_run(self, response):
        '''
        代码抽取，将循环进行数据抓取保存的代码抽取出来，解决代码的冗余
        :param urlstring:
        :param cid:
        :param filename:
        :return:
        '''
        try:
            c_name = response.meta['huiger']['c_name']
            jsobj = json.loads(response.body)
            id_list = jsonpath(jsobj, '$.res.list[*].id')
            for new_id in id_list:
                t = timestamp()
                url = 'https://mapi.kyboye.com/news/article/get?id={}&psize=10&skip=0&_t={}'.format(new_id, t)
                data = {
                    'c_name': c_name
                }
                yield scrapy.Request(
                    url=url,
                    callback=self.detail_new,
                    meta={'huiger': data},
                    dont_filter=True
                )
        except:
            pass

    def detail_new(self, response):
        '''
        新闻详情数据获取
        :param response:
        :return:
        '''
        c_name = response.meta['huiger']['c_name']
        jsobj = json.loads(response.body)
        ctime = jsonpath(jsobj, '$.res.ctime')[0]
        time = fabu_time(int(ctime))
        title = ''.join(jsonpath(jsobj, '$.res.title'))
        new_dict = {
            'title': title,
            'time': time,
            'copyfrom': jsonpath(jsobj, '$.res.copyfrom')[0],
            'content': jsonpath(jsobj, '$.res.content')
        }
        save_new(new_dict, c_name)


def wipe_out(string):
    '''
    将正文中的前端字符进行去除
    :param string:
    :return:
    '''
    pre = re.compile('>(.*?)<')
    s1 = ''.join(pre.findall(string))
    s2 = s1.replace('。', '。\n').replace(' ', '')
    return s2


def save_new(new_dict, dirname):
    '''
    对数据进行保存
    :param new_dict:
    :param dirname:
    :return:
    '''
    try:
        filename = new_dict['title'].replace('“', '').replace('”', '').replace('/', '-').replace(':', '-')
        content = ''.join(new_dict['content'])
        con = wipe_out(content)
        string = new_dict['title'] + '\n' + new_dict['time'] + '      ' + new_dict['copyfrom'] + '\n' + con
        with open(r'D:/code/考研帮app抓取/新闻/{}/'.format(dirname) + filename + '.txt', 'a', encoding='utf-8') as f:
            f.write(string)
            print('{}保存完成。。。'.format(filename))
    except:
        pass


def fabu_time(t):
    '''
    将时间戳转换成时间格式
    :param t:
    :return:
    '''
    timeArray = time.localtime(t)
    fabu_time = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
    return fabu_time


def timestamp():
    '''
    生成时间戳
    :return:
    '''
    t = int(time.time() * 1000)
    return t
