#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Time: 2019/4/16  9:42
# @Author: 余浪人
# @email: yulangren520@gmail.com

import requests, time,re
from lxml import etree
from apps.lib.public import get_random
from apps.spiders import publish_data, save_img
from fake_useragent import UserAgent


class Xpath_rule():
    restrictive = 0  # 采集限制条数

    def __init__(self, cls_id, list_rule, link_rule, list_img_rule, title_rule, content_rule, restrictive, set_time,
                 img_save_path=None, keys_rule=None, time_rule=None, author_rule=None, source_rule=None, goal_url=None,
                 url_relative=None, replace_content=None, encode_code='utf-8'):
        self.cls_id = cls_id
        self.list_rule = list_rule
        self.link_rule = link_rule
        self.list_img_rule = list_img_rule
        self.title_rule = title_rule
        self.content_rule = content_rule
        self.restrictive = restrictive
        self.set_time = set_time
        self.img_save_path = img_save_path
        self.keys_rule = keys_rule
        self.time_rule = time_rule
        self.author_rule = author_rule
        self.source_rule = source_rule
        self.goal_url = goal_url
        self.url_relative = url_relative
        self.replace_content = replace_content
        self.encode_code = encode_code
        self.UserAgent = UserAgent()

    def get_response(self, url):
        '''
        下载器
        :param url:
        :return:
        '''
        headers = {'User-Agent': self.UserAgent.random}
        return requests.get(url, headers=headers).content

    def parse_detail(self, response, list_img_link):
        '''
        详情页解析
        :param response: 源码
        :param list_img_link: URL:str
        :return: data:dict
        '''
        if self.restrictive == int(self.restrictive) and self.restrictive != 0:
            self.restrictive = 0
            time.sleep(self.set_time)
        html = etree.HTML(response.decode(self.encode_code))
        title = ''.join(html.xpath(self.title_rule)).strip()  # 标题
        content = '<p>'.join(st.strip() for st in html.xpath(self.content_rule)).strip()  # 内容
        keys_word = ','.join(html.xpath(self.keys_rule).join(',')).strip() if self.keys_rule else ''  # 关键词
        data_time = ''.join(html.xpath(self.time_rule)).strip() if self.time_rule else ''  # 发布时间
        author = ''.join(html.xpath(self.author_rule)[0]).strip() if self.author_rule else ''  # 作者
        source = ''.join(html.xpath(self.source_rule)).strip() if self.source_rule else ''  # 来源
        img_url = save_img(list_img_link, self.img_save_path)
        if self.replace_content:
            try:
                self.replace_content.splitlines()
                for rep in self.replace_content: content.replace(rep.split('||')[0], rep.split('||')[1])
            except:
                content = content
            # 数据入库
        if title and content: publish_data({
            "title": title, "superior": self.cls_id, "content": content, "thumb_img": img_url,
            "create_time": data_time, "author": author, "keyword": keys_word, "source": source,
            "promulgator": "管理员", "weight": get_random(1, 999), "clicks": get_random(1, 9999)
        })
        self.restrictive += 1

    def parse_list(self, response):
        '''
        列表页解析
        :param response: 接收响应
        :return:
        '''
        html = etree.HTML(response.decode(self.encode_code))
        list = html.xpath(self.list_rule)
        for new_list in list:
            list_link = self.goal_url + ''.join(new_list.xpath(self.link_rule)) if self.url_relative else ''.join(
                new_list.xpath(self.link_rule))  # 列表页detail_URL
            if self.list_img_rule and self.url_relative:
                list_img_link = self.goal_url + ''.join(new_list.xpath(self.list_img_rule))  # 相对地址时拼接
            elif self.list_img_rule and not self.url_relative:
                list_img_link = ''.join(new_list.xpath(self.list_img_rule))  # 绝对地址时
            else:
                list_img_link = ''  # 没有图片
            self.parse_detail(self.get_response(list_link), list_img_link)  # 列表小图URL

    def start_requests(self, url_data):
        '''
        开始爬取方法
        :param url_data: url地址 接受字符串获取列表
        :return: 响应数据
        '''

        for url in url_data: self.parse_list(self.get_response(url)) if isinstance(url_data, list) else self.parse_list(
            self.get_response(url_data))


class Re_rule():
    restrictive = 0  # 采集限制条数

    def __init__(self, cls_id, list_rule, link_rule, list_img_rule, title_rule, content_rule, restrictive, set_time,
                 img_save_path=None, keys_rule=None, time_rule=None, author_rule=None, source_rule=None, goal_url=None,
                 url_relative=None, replace_content=None, encode_code='utf-8'):
        self.cls_id = cls_id
        self.list_rule = list_rule
        self.link_rule = link_rule
        self.list_img_rule = list_img_rule
        self.title_rule = title_rule
        self.content_rule = content_rule
        self.restrictive = restrictive
        self.set_time = set_time
        self.img_save_path = img_save_path
        self.keys_rule = keys_rule
        self.time_rule = time_rule
        self.author_rule = author_rule
        self.source_rule = source_rule
        self.goal_url = goal_url
        self.url_relative = url_relative
        self.replace_content = replace_content
        self.encode_code = encode_code
        self.UserAgent = UserAgent()

    def get_response(self, url):
        '''
        下载器
        :param url:
        :return:
        '''
        headers = {'User-Agent': self.UserAgent.random}
        return requests.get(url, headers=headers).text

    def parse_detail(self, response, list_img_link):
        '''
        详情页解析
        :param response: 源码
        :param list_img_link: URL:str
        :return: data:dict
        '''
        if self.restrictive == int(self.restrictive) and self.restrictive != 0:
            self.restrictive = 0
            time.sleep(self.set_time)
        html = response.encode(self.encode_code)
        title = ''.join(re.findall(self.title_rule,html,flags=re.S)).strip()  # 标题
        content = '<p>'.join(st.strip() for st in re.findall(self.content_rule,html,flags=re.S)).strip()  # 内容
        keys_word = ','.join(re.findall(self.keys_rule,html,re.S)).strip() if self.keys_rule else ''  # 关键词
        data_time = ''.join(re.findall(self.time_rule,html,re.S)).strip() if self.time_rule else ''  # 发布时间
        author = ''.join(re.findall(self.author_rule,html,re.S)[0]).strip() if self.author_rule else ''  # 作者
        source = ''.join(re.findall(self.source_rule,html,re.S)).strip() if self.source_rule else ''  # 来源
        img_url = save_img(list_img_link, self.img_save_path)
        if self.replace_content:
            try:
                self.replace_content.splitlines()
                for rep in self.replace_content: content.replace(rep.split('||')[0], rep.split('||')[1])
            except:
                content = content
            # 数据入库
        if title and content: publish_data({
            "title": title, "superior": self.cls_id, "content": content, "thumb_img": img_url,
            "create_time": data_time, "author": author, "keyword": keys_word, "source": source,
            "promulgator": "管理员", "weight": get_random(1, 999), "clicks": get_random(1, 9999)
        })
        self.restrictive += 1

    def parse_list(self, response):
        '''
        列表页解析
        :param response: 接收响应
        :return:
        '''
        html = response.encode(self.encode_code)
        list = ''.join(re.findall(self.list_rule,html,flags=re.S)) # 列表
        list_url = re.findall(self.link_rule,list,flags=re.S)  # 列表链接
        list_url_img = re.findall(self.list_img_rule,list,flags=re.S)  # 列表img链接
        for url,img in zip(list_url,list_url_img):
            list_link = self.goal_url + url if self.url_relative else url  # 列表页detail_URL
            if self.list_img_rule and self.url_relative:
                list_img_link = self.goal_url + img  # 相对地址时拼接
            elif self.list_img_rule and not self.url_relative:
                list_img_link = img  # 绝对地址时
            else:
                list_img_link = ''  # 没有图片
            self.parse_detail(self.get_response(list_link), list_img_link)  # 列表小图URL

    def start_requests(self, url_data):
        '''
        开始爬取方法
        :param url_data: url地址 接受字符串获取列表
        :return: 响应数据
        '''

        for url in url_data: self.parse_list(self.get_response(url)) if isinstance(url_data, list) else self.parse_list(
            self.get_response(url_data))
