# -*- coding: UTF-8 -*-
# Project : biobase_python
# File : instrument.py.py
# IDE : PyCharm
# Author : 博科（鑫贝西）田聪
# Date : 2021/10/23 8:54
# PS : 数据信息网爬虫
# URL : https://www.b2b168.com/
import datetime
import random
import re
import time
from hashlib import sha1
from urllib import parse

import requests
from lxml import etree
from xpinyin import Pinyin

from new_tools.useragent import random_ua
from new_tools.toMysql import MySqlLink
from new_tools.data_clean import con_clean
from new_tools.times import Time

from new_tools.toRedis import Redis_DB
from new_tools.settings import MESSAGE

class b2bb168Spider:
    r = Redis_DB().r
    def __init__(self):

        self.mysql = MySqlLink()
        self.maxpage = 0
    def start_request(self):

        p = Pinyin()
        KEYS = self.mysql.get_search_key('fa_art_search_key')

        for key in KEYS:
            self.key = key
            url = f'https://www.b2b168.com/news/k-{p.get_pinyin(key).replace("-","")}/'
            self.parse(url)

    def parse(self, url):

        # 反馈的搜索链接

        response = self.hrefget(url)
        if type(response) !='dict' :


            _res = response.text

            HTML = etree.HTML(_res)

            uls = HTML.xpath('//div[@class="newsL"]/ul[@class="list-item"]')
            for ul in uls:

                sha = sha1()
                item = {'id': '', 'search_key': self.key}

                title = ''.join(ul.xpath('.//a[@class="title "]/text()'))
                if '招标' in title or '投标' in title or '采购' in title or '购买' in title or '修订' in title or '培训' in title or '中标' in title or '热搜' in title or '购置' in title or '名单' in title:
                    continue
                item['title'] = title

                href = parse.urljoin(url,ul.xpath('.//a[@class="title "]/@href')[0])
                item['url'] = href
                if '/news/' not in href:
                    continue

                if self.r.hsetnx(MESSAGE,href,''):
                    sha.update(item['url'].encode())
                    item['id'] = sha.hexdigest()

                    item['createtime'] = Time.str_to_stamp(ul.xpath('.//span[@class="reply"]/text()')[0])

                    # 封面图
                    time.sleep(0.5)
                    self.parse_content(item, href)
                else:
                    print(f'{href} - 已存在。')

        # 下一页
        # 总页码
        try:
            maxpage = int(re.findall('共 (\d+) 页 ', HTML.xpath('//div[@class="page"]/text()')[0])[0])
            self.maxpage = maxpage
        except:
            pass
        # 下一页

        if self.maxpage > 0:
            # 当前页码
            if '/l-' in url:
                newpage = int(re.findall('/l-(\d+).html', url)[0])


            else:
                newpage = 1
                url += 'l-1.html'

            nextpage = newpage + 1
            if self.maxpage >= nextpage:
                nexturl = re.sub(r'/l-(\d+).html', f'/l-{nextpage}.html', url)
                self.parse(nexturl)

    def parse_content(self, item,href):

        time.sleep(1)
        url = item['url']
        response = self.hrefget(url)
        HTML = etree.HTML(response.text)
        contents = []
        sections = HTML.xpath('//ul[@class="neirong2"]/section|//ul[@class="neirong2"]/p|//ul[@class="neirong2"]/span|//ul[@class="neirong2"]/div')
        for section in sections:
            contents.append(''.join(section.xpath('.//text()')))
        if (len(contents) == 3 and contents[1] == contents[2] == '') or (len(contents) == 1 and len(contents[0])<100):
            divs = HTML.xpath('//ul[@class="neirong2"]/div')
            for div in divs:
                contents.append(''.join(div.xpath('.//text()')))

        content = re.sub(r'\n{2,}','',con_clean(contents))
        item['content'] = content.replace('\n\r\n上一篇:','').replace(r'\n\r\n上一篇:','')
        self.mysql.insert_item(item)


    def hrefget(self, url, degree=5):
        headers = {
            'User-Agent': random_ua()
        }
        response = requests.get(
            url=url,
            headers=headers,
            timeout=100
        )
        response.encoding = response.apparent_encoding

        if response.status_code == 200:
            return response
        elif degree >= 0:
            return self.hrefget(url, degree - 1)
        else:
            return {'错误代码': response.status_code}





