# -*-coding:utf-8 -*-
# @Time: 2023/2/20 0020 下午 14:33
# @Author: Cwoner
# @Organization: CTIM
# @Software: PyCharm
import json

import requests
from lxml import etree
import hashlib
from setting import SAVEPATH
import os
import re
import time
from tools.serverAPI import upload_file
import urllib.parse
from tools.log_system import local_log

class MuDanJiangRBao():
    def __init__(self,ccdate,id=''):#20230101
        self.url = f'http://xwb.joyhua.cn/xwb/{ccdate}/html/page_01.htm'
        self.ccdate = ccdate
        self.id = id
        self.name = '牡丹江日报'
        print('初始化：',self.name, self.url)
        self.mid = hashlib.md5((self.name+'_baozi').encode()).hexdigest()
        if not os.path.isdir(SAVEPATH+self.mid):
            os.mkdir(SAVEPATH+self.mid)


    def get_gzhList(self):
        headers = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept-Language": "zh,zh-CN;q=0.9",
            "Connection": "keep-alive",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "Origin": "http://39.99.140.242",
            "Referer": "http://39.99.140.242/wx_gzh/media/?id=gh_55fc44c27b56",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
            "X-Requested-With": "XMLHttpRequest",
            "sign": "8fb6a26324bbc997d353fadfe6c155ff"
        }
        url = "http://39.99.140.242/wx_gzh/api/fetch_data/gzh_detail/"
        data = {
            "start": "0",
            "length": "300",
            "id": "gh_55fc44c27b56",
            "order[0][dir]": "desc",
            "order[0][column]":3
        }
        response = requests.post(url, headers=headers, data=data, verify=False)
        if response.status_code == 200:
            data = response.json()['data']
            for item in data:
                article_title = item['article_title']
                article_content_url = item['article_content_url']
                if '数字报' in article_title and f'{self.ccdate[:4]}年{self.ccdate[4:6].replace("0","")}月{self.ccdate[6:].replace("0","")}日' in article_title:
                    print(f'{self.ccdate[:4]}年{self.ccdate[4:6].replace("0","")}月{self.ccdate[6:].replace("0","")}日' ,article_title)
                    print(article_content_url)
                    return article_content_url



    def get_weapp_image_link(self,url):
        headers = {
            "authority": "mp.weixin.qq.com",
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
            "cache-control": "max-age=0",
            "if-modified-since": "Tue, 20 Jun 2023 11:03:56 +0800",
            "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Microsoft Edge\";v=\"114\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Windows\"",
            "sec-fetch-dest": "document",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "none",
            "sec-fetch-user": "?1",
            "upgrade-insecure-requests": "1",
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.51"
        }
        response = requests.get(url, headers=headers)
        if response.status_code == 200:
            text = response.text
            html = etree.HTML(text)
            try:
                weapp_image_link = html.xpath('//a[@class="weapp_image_link js_weapp_entry"]/@data-miniprogram-path')[0]
                weapp_image_link = 'https://' + urllib.parse.unquote(re.sub('pages/eachbook/eachbook\?scene=','',weapp_image_link))
                self.weapp_image_link = weapp_image_link
            except:
                weapp_image_link = ''
            return weapp_image_link



    def get_base64(self,weapp_image_link):
        headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "Accept-Language": "zh,zh-CN;q=0.9",
            "Cache-Control": "max-age=0",
            "Connection": "keep-alive",
            "Referer": "https://book.yunzhan365.com/chxf/qbnl/index.html",
            "Sec-Fetch-Dest": "document",
            "Sec-Fetch-Mode": "navigate",
            "Sec-Fetch-Site": "same-origin",
            "Sec-Fetch-User": "?1",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
            "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Google Chrome\";v=\"114\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Windows\""
        }
        url = f"{weapp_image_link}mobile/index.html"
        response = requests.get(url, headers=headers)
        if response.status_code == 200:
            text = response.text
            config_base64 = re.search('javascript/config\.js\?(.*?)">',text,re.M).group(1)
            return config_base64


    def get_imgs(self,config_base64):
        headers = {
            "Host": "book.yunzhan365.com",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 MicroMessenger/7.0.20.1781(0x6700143B) NetType/WIFI MiniProgramEnv/Windows WindowsWechat/WMPF XWEB/6939",
            "Accept": "*/*",
            "Sec-Fetch-Site": "same-origin",
            "Sec-Fetch-Mode": "no-cors",
            "Sec-Fetch-Dest": "script",
            "Referer": "https://book.yunzhan365.com/chxf/qbnl/mobile/index.html",
            "Accept-Language": "zh-CN,zh",
        }
        url = f"{self.weapp_image_link}mobile/javascript/config.js?"+ config_base64
        response = requests.get(url, headers=headers)
        if response.status_code == 200:
            text = response.text
            htmlConfig = re.search('var htmlConfig = (.*?);',text).group(1)
            data = json.loads(htmlConfig)
            try:
                fliphtml5_pages = data['fliphtml5_pages']
            except:
                fliphtml5_pages = []
                url = data['meta']['url']
                base_url = url.replace('index.html','')
                for i in range(1,10):
                    fliphtml5_pages.append({"n":[f'{base_url}files/mobile/{i}.jpg']})

            return fliphtml5_pages




    def index(self,page):
        headers = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Connection": "keep-alive",
            "Referer": "https://www.yunzhan365.com/homepage/chxf/",
            "Sec-Fetch-Dest": "empty",
            "Sec-Fetch-Mode": "cors",
            "Sec-Fetch-Site": "same-origin",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36",
            "X-Requested-With": "XMLHttpRequest",
            "sec-ch-ua": "\"Chromium\";v=\"110\", \"Not A(Brand\";v=\"24\", \"Google Chrome\";v=\"110\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Windows\""
        }
        url = "https://www.yunzhan365.com/api/user/get-homepage"
        params = {
            "userId": "25639",
            "current": str(page),
            "size": "100",
            "lastTime": str(int(time.time()*1000))
        }
        response = requests.get(url, headers=headers,params=params)
        if response.status_code == 200:
            data = response.json()['data']
            books = data['books']
            for bookItem in books:
                title = bookItem['title']
                if self.ccdate[4] == '0':
                    m = self.ccdate[5:6]
                else:
                    m = self.ccdate[4:6]
                if self.ccdate[6] == '0':
                    d = self.ccdate[7:]
                else:
                    d = self.ccdate[6:]
                s = f'{self.ccdate[:4]}年{m}月{d}日'
                if s in title and '牡丹江' in title:
                    print(bookItem)
                    url = bookItem['url']
                    print(url)
                    return title,url
            else:
                return False,False



    def __download(self,file,url,title,tid):
        headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Cache-Control": "max-age=0",
            "Connection": "keep-alive",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36"
        }
        try:
            response = requests.get(url, headers=headers, verify=False)
        except:
            return
        if response.status_code == 200:
            path = SAVEPATH + self.mid + '/' + self.ccdate + '/'
            if not os.path.isdir(path):
                os.mkdir(path)
            with open(path + file,'wb',) as f:
                f.write(response.content)
            data = {
                'entity_id': self.mid,
                'title': title,
                'tid': tid,
                'file_name': file,
                'origin_url': url,
                'ndate': self.ccdate[:4] + '-' + self.ccdate[4:6] + '-' + self.ccdate[6:]
            }
            upload_file(data, response.content)
            return True
        else:
            local_log(self.id, f'报纸图片下载失败，{url}',time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),f'报纸地址：',f'响应状态码：',media_name=self.name)
            return False


    def dispatcher(self):
        article_content_url = self.get_gzhList()
        return article_content_url



    def run(self):
        article_content_url = self.dispatcher()
        if not article_content_url:
            local_log(self.id, '当日没有报纸？公众号监测系统中无数据，退出~',time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),f'报纸地址：',f'响应状态码：',media_name=self.name)
            return
        weapp_image_link = self.get_weapp_image_link(article_content_url)
        if weapp_image_link:
            config_base64 = self.get_base64(weapp_image_link)
            fliphtml5_pages = self.get_imgs(config_base64)
            print(fliphtml5_pages)
            i = 0
            for item in fliphtml5_pages:
                i += 1
                file = item['n'][0]
                title = f'未知标题{i}'
                tid = hashlib.md5((self.name+title+self.ccdate).encode()).hexdigest()
                url = self.weapp_image_link + 'files/large/' + file.replace('../','')
                self.__download(file,url,title,tid)








if __name__ == '__main__':
    ccdate = '20230520'
    hr = MuDanJiangRBao(ccdate)
    hr.run()



