# -*- coding: UTF-8 -*-
# Project : biobase_python
# File : instrument.py.py
# IDE : PyCharm
# Author : 博科（鑫贝西）田聪
# Date : 2021/10/23 8:54
# PS : 化工仪器网
# URL : https://www.chem17.com/
import datetime
import re
import time
import random
import logging

import requests
from urllib.parse import unquote
from lxml import etree
from urllib import parse
from hashlib import sha1
from concurrent.futures import ThreadPoolExecutor

from tools.settings import *
from tools.time_corr import timec
from tools.data_clean import con_clean, title_clena
from tools.Img_save import saveImg
from tools.tomysql import MYSQL

logging.basicConfig(filename="cham17.log", filemode="a", format="%(asctime)s-%(name)s-%(levelname)s-%(message)s",
                    level=logging.INFO)


class chemSpider:

    def __init__(self):

        self.useragent = USERLIST
        self.ttc = timec()

    def start_request(self):

        urls = [f'https://www.chem17.com/tech_news/t0/list_p1_k{i}.html' for i in KEYS]

        # ts = ThreadPoolExecutor(max_workers=MAX_WORKERS)
        # ts.map(self.parse, urls)
        # ts.shutdown(wait=True)
        self.parse('https://www.chem17.com/tech_news/t0/list_p1_k洁净工作台.html')

    def parse(self, url):
        response = self.hrefget(url)
        if type(response) is dict:

            return

        _res = response.text
        # unicode解码
        key = unquote(re.findall('list_p\d*_k(.+?).html', url)[0])

        HTML = etree.HTML(_res)
        dt_dd = []
        dllist = HTML.xpath('//div[@class="mainLeftList"]/dl')
        for dl in dllist:
            dt_dd += dl.xpath('./dt')

        for ul in dt_dd:
            # dt 标题
            # dd 时间 这个时间没有年份，没有意义

            sha = sha1()
            item = {'id': '','key':key}

            title = title_clena(''.join(ul.xpath('./a/text()')))
            if '招标' in title or '投标' in title or '采购' in title or '购买' in title or '修订' in title or '培训' in title or '中标' in title or '热搜' in title or '购置' in title or '名单' in title:
                continue

            item['title'] = title.replace('\u200b','')

            href = ul.xpath('./a/@href')[0]
            item['url'] = href
            if '/tech_news/' not in href:
                continue
            sha.update((item['url']).encode())
            item['id'] = sha.hexdigest()
            # 封面图
            item['Cover_photo'] = []

            # print(item)

            self.parse_content(item)
            time.sleep(1)

        # 下一页

        nexturl = HTML.xpath('//div[@class="newspages"]/a[@class="lt"]/@href')[0]
        if nexturl != '#':
            nexturl = parse.urljoin(url,nexturl)
            self.parse(nexturl)


    # 详情页
    def parse_content(self, item):


        url = item['url']

        response = self.hrefget(url)
        if type(response) is dict:
            return
        # _res = response.text.replace('<br>','\r\n').replace('<br/>','\r\n').replace('<br />','\r\n')
        _res = re.sub('<br.*?>','\r\n',response.text)
        HTML = etree.HTML(_res)

        createtime = HTML.xpath('//div[@class="newsTime"]/dl/dt/text()')[0].split('  ')[0].strip()
        day, temp = self.ttc.zwt(createtime)
        if day < 720:
            contents = []
            item['createtime'] = temp
            ps = HTML.xpath('//div[@id="newsContent"]/h3|//div[@id="newsContent"]/h2|//div[@id="newsContent"]/p|//div[@id="newsContent"]/div')
            for p in ps:
                contents.append(''.join(p.xpath('.//text()')))

            content = re.sub(r'\n{2,}', '', con_clean(contents))
            item['content'] = content

            item['Cover_photo'] += HTML.xpath('//div[@id="newsContent"]//img/@src')

            flag = True
            for k in KEYS:
                if k not in content:
                    flag = False
                else:
                    flag = True
                    break
            if flag:
                # 保存图片
                item['Cover_photo'] += HTML.xpath('//div[@class="mt20 f16 lh28 newsdetail"]//img/@src')
                # 数据存入数据库
                mysql = MYSQL()
                ID = item['id']
                flag = mysql.find(ID)
                images = []
                # 数据不存在
                if not flag:
                    newdata = datetime.datetime.now().strftime('%Y%m%d')
                    saveaddr = r'/www/wwwroot/daohang/public/uploads/{}'.format(newdata)
                    for img in item['Cover_photo']:
                        images.append(saveImg(img, saveaddr).replace(r'/www/wwwroot/daohang/public', ''))
                        pass
                    item['images'] = ','.join(images)
                    sql = f"INSERT INTO fa_article(id,title,content,images,createtime,url) VALUES('{item['id']}','{item['title']}','{item['content']}','{item['images']}','{item['createtime']}','{item['url']}')"
                    print(sql)
                    # mysql.sql_write(sql, ID)
                    pass
                else:
                    print(f'####{ID}#######数据已存在')





    def hrefget(self, url, degree=8):
        headers = {
            'User-Agent': random.choice(self.useragent),
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
            'Accept-Encoding': 'gzip, deflate, br',
            # 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36 Edg/95.0.1020.30'
        }
        try:
            response = requests.get(
                url=url,
                headers=headers,
                timeout=1000,
            )
            response.encoding = response.apparent_encoding
        except Exception as e:
            if degree >= 0:
                return self.hrefget(url, degree - 1)
            else:
                return {'超过重试次数': f"{e}" }

        if response.status_code == 200:
            return response
        elif degree >= 0:
            return self.hrefget(url, degree - 1)
        else:
            return {'错误代码': response.status_code}


if __name__ == '__main__':


    while True:
        try:
            ins = chemSpider()
            ins.start_request()

        except Exception as e:
            logging.error(e)
            print(e)
        logging.info("休眠6000秒")
        time.sleep(6000)
    # ins = chemSpider()
    # ins.start_request()