# -*- coding: UTF-8 -*-
# Project : biobase_python
# File : laibao.py
# IDE : PyCharm
# Author : 博科（鑫贝西）田聪
# Date : 2021/11/1 14:56

import jieba
import datetime
import re
import time
import random
import logging
import urllib.parse

import requests
from urllib.parse import unquote
from lxml import etree
from urllib import parse
from hashlib import sha1
from concurrent.futures import ThreadPoolExecutor

from tools.settings import *
from tools.time_corr import timec
from tools.data_clean import con_clean, title_clena
from tools.Img_save import saveImg
from tools.tomysql import MYSQL


class laiBaoSpider:

    def __init__(self):
        self.start_request()
        for keys in KEYS:
            jieba.add_word(keys)

    def start_request(self):
        url = r'http://www.labbase.net/News/AllNewsList-1-1.html'
        self.parse(self.htmlGet(url))

    def parse(self,response):
        # 列表页
        HTML = etree.HTML(response.text)
        dts = HTML.xpath('//div[@class="mainLeftList"]//dt')
        dds = HTML.xpath('//div[@class="mainLeftList"]//dd')
        for dtdds in zip(dts,dds):
            # 详情页标题
            detailPageTitle = dtdds[0].xpath('./a[2]/text()')[0]

            # 详情页链接
            detailsPageLink = urllib.parse.urljoin(response.url,dtdds[0].xpath('./a[2]/@href')[0])

            # 距今天数，发布时间戳
            interval,pubdate = timec().zwt(dtdds[1].xpath('./div[@class="ddleft"]/text()')[0])

            # 两年前的数据过滤掉,过滤标题
            if interval < 700 and ('招标' not in detailPageTitle or '投标' not in detailPageTitle or '采购' not in detailPageTitle or '购买' not in detailPageTitle or '修订' not in detailPageTitle or '培训' not in detailPageTitle or '中标' not in detailPageTitle or '热搜' not in detailPageTitle or '购置' not in detailPageTitle or '名单' not in detailPageTitle) and '/News/' in detailsPageLink:

                sha = sha1()
                sha.update(detailsPageLink.encode())

                item = {
                    'id':sha.hexdigest(),
                    'title':detailPageTitle,
                    'url':detailsPageLink,
                    'createtime':pubdate,
                    'Cover_photo':[]
                }
                _response = self.htmlGet(
                    url= detailsPageLink
                )
                _response.meta = {
                    'item':item
                }
                self.parse_content(
                    response = _response,
                )

        # 下一页
        if HTML.xpath('//div[@class="digg"]/a[last()]/text()')[0] == '下一页»':

            self.parse(self.htmlGet(parse.urljoin(response.url,HTML.xpath('//div[@class="digg"]/a[last()]/@href')[0])))
            time.sleep(2)
    def parse_content(self,response):
        # 详情页
        item = response.meta['item']
        _res = re.sub('<br.*?>', '\r\n', response.text)
        HTML = etree.HTML(_res)
        contents = []
        ps = HTML.xpath('//div[@id="newsContent"]/div/p|//div[@id="newsContent"]/div/div/p/span|//div[@id="newsContent"]/div/div/span|//div[@id="newsContent"]/div/div/strong/span')
        for p in ps:
            contents.append(''.join(p.xpath('.//text()')))
        contents = con_clean(contents)
        conts = jieba.lcut(contents)
        _KEYS = dict(zip(KEYS,[0 for i in range(0,len(KEYS))]))
        for cont in conts:
            if cont in _KEYS:
                _KEYS[cont] +=1
        maxtag = ''
        maxflag = 0
        for k in _KEYS:
            if _KEYS.get(k)>maxflag:
                maxtag = k
                maxflag = _KEYS.get(k)

        if sum(_KEYS.values()) >3:
            item['content'] = contents
            item['title'] = f'{maxtag}_{item["title"]}'

            # 提取图片链接
            imgUrls = [i for i in HTML.xpath('//div[@id="newsContent"]//img/@src') if 'kuaizhan' not in i and 'changyan.sohu.com' not in i and 'labbase' not in i]

            item['Cover_photo'] = imgUrls



            # 保存图片
            item['Cover_photo'] += HTML.xpath('//div[@class="mt20 f16 lh28 newsdetail"]//img/@src')
            # 数据存入数据库
            mysql = MYSQL()
            ID = item['id']
            flag = mysql.find(ID)
            images = []
            # 数据不存在
            if not flag:
                newdata = datetime.datetime.now().strftime('%Y%m%d')
                saveaddr = r'/www/wwwroot/daohang/public/uploads/{}'.format(newdata)
                for img in item['Cover_photo']:
                    # images.append(saveImg(img, saveaddr).replace(r'/www/wwwroot/daohang/public', ''))
                    pass
                item['images'] = ','.join(images)
                sql = f"INSERT INTO fa_article(id,title,content,images,createtime,url) VALUES('{item['id']}','{item['title']}','{item['content']}','{item['images']}','{item['createtime']}','{item['url']}')"
                print(sql)
                # mysql.sql_write(sql, ID)
                pass
            else:
                print(f'####{ID}#######数据已存在')
    def htmlGet(self, url, degree=8):
        headers = {
            'User-Agent': random.choice(USERLIST),
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
            'Accept-Encoding': 'gzip, deflate, br',
            # 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36 Edg/95.0.1020.30'
        }
        try:
            response = requests.get(
                url=url,
                headers=headers,
                timeout=1000,
            )
            response.encoding = response.apparent_encoding
        except Exception as e:
            if degree >= 0:
                return self.htmlGet(url, degree - 1)
            else:
                return {'超过重试次数': f"{e}" }

        if response.status_code == 200:
            return response
        elif degree >= 0:
            return self.htmlGet(url, degree - 1)
        else:
            return None


if __name__ == '__main__':
    lbs = laiBaoSpider()
