# -*- coding: UTF-8 -*-
# Project : biobase_python
# File : instrument.py.py
# IDE : PyCharm
# Author : 博科（鑫贝西）田聪
# Date : 2021/10/23 8:54
# PS : 数据信息网爬虫
# URL : https://www.instrument.com.cn/

from tools import toMysql
import datetime
import re
import time
import random
import logging

import requests

from lxml import etree
from lxml.html import tostring
from hashlib import sha1

class insSpider:



    def start_request(self):

        urls = [r'https://search.instrument.com.cn/w/search?act=news&keywords={}&sort=1&pageIndex=1&loginSource=1'.format(i) for i in KEYS]


        for url in urls:
            self.parse(url)

        # self.parse('https://search.instrument.com.cn/w/search?act=news&keywords=生物安全柜&sort=1&pageIndex=1&loginSource=1')

    def parse(self, url):

        response = self.hrefget(url)
        if type(response) is dict:
            return
        _res = response.text
        key = re.findall('&keywords=(.+?)&', url)[0]  # 从搜索链接中取标签

        if '您输入的搜索词在当前栏目无搜索结果' not in _res:
            HTML = etree.HTML(_res)
            ullist = HTML.xpath('//div[@class="clearfix mb10"]')

            for ul in ullist:
                sha = sha1()
                item = {'id': '', 'key': key}

                title = ''.join(ul.xpath('./p/a/text()'))
                if '招标' in title or '投标' in title or '采购' in title or '购买' in title or '修订' in title or '培训' in title or '中标' in title or '热搜' in title or '购置' in title or '名单' in title:
                    continue
                
                 
                item['title'] = key + '___' + title

                href = ul.xpath('./p/a/@href')[0]
                item['url'] = href
                if '/news/' not in href:
                    continue

                sha.update(item['url'].encode())
                item['id'] = sha.hexdigest()

                item['createtime'] = ul.xpath('.//em[@class="mr20 fl"]/text()')[0]


                item['createtime'] = temp

                # 封面图
                item['Cover_photo'] = ul.xpath('./div/div/a/img/@src')

                # 进入详情页
                self.parse_content(item, href)

                # 避免被反爬 睡眠0.5s
                time.sleep(0.5)

            # 下一页
            if '下一页' in _res:
                newpage = int(re.findall('&pageIndex=(\d+)&', url)[0])
                nextpage = newpage + 1
                nexturl = re.sub(r'&pageIndex=(\d+)&', f'&pageIndex={nextpage}&', url)
                self.parse(nexturl)

    # 详情页
    def parse_content(self, item, href):

        response = self.hrefget(href)
        HTML = etree.HTML(response.text)

        # 获取内容
        ps = HTML.xpath('//div[@class="mt20 f16 lh28 newsdetail"]/p')
        contents = []
        for p in ps:
            contents.append(''.join(p.xpath('.//text()')))
        if len(contents) == 1 and len(contents[0]) > 100:

            ct = tostring(ps[0], encoding="utf-8").decode("utf-8")
            contents = ct.replace('<p>', '\r\n').replace('</p>', '\r\n').split('<br>')

        else:
            if len(contents) < 2:
                contents = []
                div = HTML.xpath('//div[@class="mt20 f16 lh28 newsdetail"]/div')
                for p in div:
                    contents.append(''.join(p.xpath('.//text()')))

            if len(contents) < 3:
                contents = []
                section = HTML.xpath('//div[@class="mt20 f16 lh28 newsdetail"]/section')
                for p in section:
                    contents.append(''.join(p.xpath('.//text()')))

        content = con_clean(contents)
        if len(content) == 0:
            return
        item['content'] = content
        flag = True
        for k in KEYS:
            if k not in content:
                flag = False
            else:
                flag = True
                break
        if flag:
            # print(item)
    
            # 保存图片
            item['Cover_photo'] += HTML.xpath('//div[@class="mt20 f16 lh28 newsdetail"]//img/@src')
    
            # 数据存入数据库
            mysql = MYSQL()
            ID = item['id']
            flag = mysql.find(ID)
            images = []
            # 数据不存在
            if not flag:
                newdata = datetime.datetime.now().strftime('%Y%m%d')
                saveaddr = r'/www/wwwroot/daohang/public/uploads/{}'.format(newdata)
                for img in item['Cover_photo']:
                    images.append(saveImg(img, saveaddr).replace(r'/www/wwwroot/daohang/public', ''))
                item['images'] = ','.join(images)
                sql = f"INSERT INTO fa_article(id,title,content,images,createtime,url) VALUES('{item['id']}','{item['title']}','{item['content']}','{item['images']}','{item['createtime']}','{item['url']}')"
                mysql.sql_write(sql, ID)
            else:
                print(f'####{ID}#######数据已存在')
        else:
            print(f'####{ID}#######不包含关键字')

    def hrefget(self, url, degree=5):
        headers = {
            'User-Agent': random.choice(self.useragent)
        }
        response = requests.get(
            url=url,
            headers=headers,
            # allow_redirects= False
        )
        response.encoding = response.apparent_encoding

        if response.status_code == 200:
            return response
        elif degree >= 0:
            return self.hrefget(url, degree - 1)
        else:
            return {'错误代码': response.status_code}


if __name__ == '__main__':

    # ins.saveimg('https://img1.17img.cn/17img/images/202109/uepic/fa23564b-1b01-48c5-a78f-94daae94f136.jpg')

    while True:
        try:
            oldtime = datetime.datetime.now()
            ins = insSpider()
            # ins.parse_content('https://www.instrument.com.cn/news/20210128/571753.shtml')
            ins.start_request()
            newtime = datetime.datetime.now()
            print(f"{oldtime}开始###########{newtime}结束")
        except Exception as e:
            logging.error(e)
        logging.info("休眠6000秒")
        time.sleep(6000)
