# -*-coding:utf-8 -*-
# @Time: 2023/3/15 0015 上午 9:35
# @Author: Cwoner
# @Organization: CTIM
# @Software: PyCharm
# -*-coding:utf-8 -*-
# @Time: 2023/2/20 0020 下午 14:33
# @Author: Cwoner
# @Organization: CTIM
# @Software: PyCharm


import requests
from lxml import etree
import hashlib
from setting import SAVEPATH
import os
import re
import time

from tools.serverAPI import upload_file
from tools.log_system import local_log


class TongLingRBao():
    def __init__(self,ccdate,id=''):#20230101
        self.url = f'http://szb.tlnews.cn/tlrb/tlrb/pc/layout/{ccdate[:6]}/{ccdate[6:8]}/node_A01.html'
        self.ccdate = ccdate
        self.id = id
        self.name = '铜陵日报'
        print('初始化：',self.name, self.url)
        self.mid = hashlib.md5((self.name+'_baozi').encode()).hexdigest()
        if not os.path.isdir(SAVEPATH+self.mid):
            os.mkdir(SAVEPATH+self.mid)

    def index(self,c=0):
        headers = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Connection": "keep-alive",
            "Referer": "https://www.yunzhan365.com/homepage/chxf/",
            "Sec-Fetch-Dest": "empty",
            "Sec-Fetch-Mode": "cors",
            "Sec-Fetch-Site": "same-origin",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36",
            "X-Requested-With": "XMLHttpRequest",
            "sec-ch-ua": "\"Chromium\";v=\"110\", \"Not A(Brand\";v=\"24\", \"Google Chrome\";v=\"110\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Windows\""
        }
        try:
            response = requests.get(self.url, headers=headers,timeout=10)
        except:
            if c > 3:
                local_log(self.id, '网络错误超3次，退出~', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())), f'报纸地址：{self.url}',media_name=self.name)
                return
            return self.index(c + 1)
        if response.status_code == 200:
            text = response.content.decode('utf-8')
            html = etree.HTML(text)
            posRelative = html.xpath('//li[@class="posRelative"]/a')
            result = {}
            for bookItem in posRelative:
                title = bookItem.xpath('./text()')[0]
                url = f'http://szb.tlnews.cn/tlrb/tlrb/pc/layout/{self.ccdate[:6]}/{self.ccdate[6:8]}/' + bookItem.xpath('./@href')[0]
                result[title] = url
            return result
        else:
            local_log(self.id, '当日没有报纸？访问失败超3次，退出~',time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),f'报纸地址：{self.url}',f'响应状态码：{response.status_code}',media_name=self.name)
            print(response.status_code)


    def get_pdf(self,url,c=1):
        headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Cache-Control": "max-age=0",
            "Connection": "keep-alive",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36"
        }
        try:
            response = requests.get(url, headers=headers, verify=False,timeout=10)
        except:
            if c > 3:
                local_log(self.id,'目标网站链接失败超3次，退出~',time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))
                return
            return self.get_pdf(url,c+1)
        if response.status_code == 200:
            text = response.text
            html = etree.HTML(text)
            pdf_url = 'http://szb.tlnews.cn/tlrb/tlrb/pc/' + html.xpath('//div[@class="pull-right"]/a/@href')[0]
            pdf_url = pdf_url.replace('../../../','')
            return pdf_url
        else:
            local_log(self.id, '版面访问失败超3次，退出~',
                      time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())), f'版面地址：{url}',
                      f'响应状态码：{response.status_code}',media_name=self.name)
            print(response.status_code)
            return None




    def __download(self,file,url,title,tid,c=0):
        headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Cache-Control": "max-age=0",
            "Connection": "keep-alive",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36"
        }
        try:
            response = requests.get(url, headers=headers, verify=False,timeout=10)
        except:
            if c > 3:
                local_log(self.id, '下载图片失败超3次，退出~',
                          time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())), f'图片地址：{url}',
                          media_name=self.name)
                return
            return self.__download(file,url,title,tid,c+1)
        if response.status_code == 200:
            path = SAVEPATH + self.mid + '/' + self.ccdate + '/'
            if not os.path.isdir(path):
                os.mkdir(path)
            with open(path + file,'wb',) as f:
                f.write(response.content)
            data = {
                'entity_id': self.mid,
                'title': title,
                'tid': tid,
                'file_name': file,
                'origin_url': url,
                'ndate': self.ccdate[:4] + '-' + self.ccdate[4:6] + '-' + self.ccdate[6:8]
            }
            upload_file(data, response.content)
            return True
        else:
            local_log(self.id, f'下载图片失败，响应状态码：{response.status_code}，跳过~',
                      time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())), f'图片地址：{url}',media_name=self.name)
            return False


    def dispatcher(self):
        i = 1
        while True:
            data = self.index()
            if not data:
                print(f'{self.ccdate}无报纸！')
                return
            for title,url in data.items():
                pdf_url = self.get_pdf(url)
                print('找到：',title,pdf_url)
                j = 1
                tid = hashlib.md5((self.name+pdf_url+self.ccdate).encode()).hexdigest()
                file = tid + '.pdf'
                stat = self.__download(file,pdf_url,title,tid)
                j += 1
                if not stat:
                    break
            else:
                break
            i += 1


    def run(self):
        self.dispatcher()





if __name__ == '__main__':
    ccdate = '20231027'
    hr = TongLingRBao(ccdate)
    hr.run()



