# -*- coding: utf-8 -*-
# @Time    : 2020/1/13 16:43
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import re
import scrapy
from urllib.parse import urljoin
from scrapy.selector import Selector
from w3lib.html import remove_tags
from NewsSpider.items import NewsItem
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.redis_db import Redis_DB
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.parse_html import extract_pubtime,extract_html


class YangGuangShuoFa(scrapy.Spider):

    name = 'Ygsf'
    types = ['文字寻找','视频寻找']
    redis = Redis_DB()

    t = Times()
    headers = {
        'User-Agent': 'Mozilla/5.0 (Linux; Android 7.1.2; G011A Build/N2G48H; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/52.0.2743.100 Safari/537.36',
        'Content-Type': 'application/x-www-form-urlencoded',
        'Accept-Encoding': ' gzip, deflate',
        'Connection': 'keep-alive'
    }
    custom_settings = {
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }


    def start_requests(self):
        for t in self.types:
            if t == '文字寻找':
                url = f"http://falv.sjdstv.cn/data.asp?leixing={t}"
                params = "last=0&amount=8"
                yield scrapy.Request(url, method='POST', headers=self.headers, body=params,
                                     dont_filter=True,meta={"type":t,"number":0},callback=self.parse_list)
            else:
                url = f"http://falv.sjdstv.cn/data.asp?leixing={t}"
                params = "last=0&amount=8"
                yield scrapy.Request(url, method='POST', headers=self.headers, body=params,
                                     dont_filter=True, meta={"type": t, "number": 0}, callback=self.parse_list)

    def parse_list(self, response):
        t = response.meta['type']
        number = response.meta['number']
        new_number = number+8
        text = response.text.replace('\r', '').replace('\n', '').replace('\t', ' ')
        if text:
            list1 = eval(text)
            if t == '文字寻找':
                url = f"http://falv.sjdstv.cn/data.asp?leixing={t}"
                params =f"last={new_number}&amount=8"
                yield scrapy.Request(url, method='POST', headers=self.headers, body=params,
                                     dont_filter=True, meta={"type": t, "number": new_number}, callback=self.parse_list)
            else:
                url = f"http://falv.sjdstv.cn/data.asp?leixing={t}"
                params = f"last={new_number}&amount=8"
                yield scrapy.Request(url, method='POST', headers=self.headers, body=params,
                                     dont_filter=True, meta={"type": t, "number": new_number}, callback=self.parse_list)
            for i in list1:
                dicts = {}
                if t == '文字寻找':
                    select = Selector(text=i['aaa'])
                    pubdate = i['ccc']
                    href = select.css("a::attr(href)").extract_first()
                    url = urljoin(response.url, href)
                    title = select.css("a::attr(title)").extract_first()
                    dicts['title'] = title
                    dicts['url'] = url
                    dicts['pubdate'] = pubdate
                    if url is None:
                        continue
                    yield scrapy.Request(url, headers=self.headers, dont_filter=True, meta=dicts)
                else:
                    select = Selector(text=i['InfoPicture'])
                    a = select.css("a::attr(href)").extract_first()
                    url = urljoin(response.url, a)
                    title = select.css("a::attr(title)").extract_first()
                    id = re.search('id=(\d+)', url).group(1)
                    content_url = f'http://falv.sjdstv.cn/xy.asp?c=aqjj&id={id}'
                    dicts['title'] = title
                    dicts['url'] = url
                    if url is None:
                        continue
                    yield scrapy.Request(content_url, headers=self.headers, dont_filter=True, meta=dicts)


    def parse(self, response):
        item = NewsItem()
        url = response.meta['url']
        id = Utils.url_hash(url)
        item['url'] = url
        item['id'] = id
        if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
            print('该id:%s已存在' % id)
        else:
            item['title'] = response.meta['title']
            try:
                pubdate = response.meta['pubdate']
                if pubdate is None:
                    pubdate = extract_pubtime(response.text)
                pubdate = str(self.t.datetimes(pubdate))
                item['pubdate'] = pubdate
            except:
                pubdate = extract_pubtime(response.text)
                if pubdate is None:
                    pubdate = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                pubdate = str(self.t.datetimes(pubdate))
                item['pubdate'] = pubdate
            try:
                html = extract_html(response.text)
            except:
                html = ''
            item['content'] = remove_tags(html)
            try:
                item['author'] = ''
            except:
                item['author'] = ''
            item['formats'] = "app"
            item['dataSource'] = '阳光说法'
            item['serchEnType'] = "阳光说法"
            try:
                item['html'] = html
            except:
                item['html'] = ''
            item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            item['collectProcess'] = 'crawl_news'
            item['serverIp'] = "113.128.12.74"
            yield item


