# -*-coding:utf-8 -*-
# @Time: 2023/2/20 0020 下午 14:38
# @Author: Cwoner
# @Organization: CTIM
# @Software: PyCharm


import requests
from lxml import etree
import hashlib
from setting import SAVEPATH
import os
import re

from tools.serverAPI import upload_file

class HaErBinRBao():
    def __init__(self,ccdate,id=''):#20230101
        self.url = f'http://harbin.joyhua.cn/hebrb/{ccdate}/html/'
        self.ccdate = ccdate
        self.id = id
        self.name = '哈尔滨日报'
        print('初始化：',self.name, self.url)
        self.mid = hashlib.md5((self.name+'_baozi').encode()).hexdigest()
        if not os.path.isdir(SAVEPATH+self.mid):
            os.mkdir(SAVEPATH+self.mid)

    def page(self,url):
        headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Cache-Control": "max-age=0",
            "Connection": "keep-alive",
            "Referer": "http://harbin.joyhua.cn/hebrb/20230220/html/page_07.htm",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36"
        }
        response = requests.get(url, headers=headers, verify=False)
        if response.status_code == 200:
            text = response.content.decode('utf-8')
            html = etree.HTML(text)
            page_bottoms = html.xpath('//div[@class="page_bottom"]/a')
            if len(page_bottoms) == 4:
                next_page = self.url + page_bottoms[2].xpath('./@href')[0]
                print('下一页',next_page)
                if page_bottoms[2].xpath('./@href')[0] == page_bottoms[3].xpath('./@href')[0]:
                    next_page = False
            else:
                next_page = False
            b_bot = html.xpath('//div[@class="b_bot"]')[0]
            title = b_bot.xpath('string(.)')
            title = re.sub('\s','',title)
            if not title:
                title = f'未知标题:{url}'
            pdf_url = 'http://harbin.joyhua.cn' + b_bot.xpath('./a/@href')[0]
            tid = hashlib.md5((self.name+title+self.ccdate).encode()).hexdigest()
            file = tid + '.pdf'
            print(title,pdf_url)
            if pdf_url:
                self.__download(file,pdf_url,tid,title)
            return next_page




    def __download(self,file,url,tid,title):
        headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Cache-Control": "max-age=0",
            "Connection": "keep-alive",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36"
        }
        response = requests.get(url, headers=headers, verify=False)
        if response.status_code == 200:
            path = SAVEPATH + self.mid + '/' + self.ccdate + '/'
            if not os.path.isdir(path):
                os.mkdir(path)
            with open(path + file,'wb',) as f:
                f.write(response.content)
            data = {
                'entity_id': self.mid,
                'title': title,
                'tid': tid,
                'file_name': file,
                'origin_url': url,
                'ndate': self.ccdate[:4] + '-' + self.ccdate[4:6] + '-' + self.ccdate[6:]
            }
            upload_file(data, response.content)


    def run(self):
        next = self.url
        while True:
            next = self.page(next)
            if not next:
                break






if __name__ == '__main__':
    ccdate = '20230106'
    hr = HaErBinRBao(ccdate)
    hr.run()



