from applications.extensions import db, mail
from applications.models import UrlModel, LatestTimeModel
from applications.common.spider.config.wechat_public_config import *
from applications.common.utils.trans_time import stamp2str
from applications.common.utils.upload import upload_to_github
from applications.common.curd import get_one_by_id
from bs4 import BeautifulSoup
import hashlib
import requests
import time
import os
import random
import json as jsons


class WeChatPublic(object):
    def __init__(self):
        self.session = requests.session()
        self.headers = {
            'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                          "Chrome/101.0.4951.54 Safari/537.36",
            'Referer': 'https://mp.weixin.qq.com/'
        }
        self.logs = []
        self.category_id = 1
        self.LATEST_TIME = 0
        # 当前公众号
        self.pos = 0
        self.fakeid = ""
        self.name = ""

        self.cookies = None
        self.token = None
        # 当前文章
        self.articles = []

    def reset_wxpublic_tar(self, tar):
        self.pos = tar["id"]
        self.fakeid = tar["fakeid"]
        self.name = tar["name"]
        self.category_id = tar["category_id"]
        self.articles = []

    def SaveLATEST_TIME(self, cur_time):
        t = get_one_by_id(model=LatestTimeModel, id=self.pos)
        t.time = int(cur_time)
        db.session.commit()

    def LoadLATEST_TIME(self):
        t = get_one_by_id(model=LatestTimeModel, id=self.pos)
        self.LATEST_TIME = t.time

    def get_cookies(self):
        self.add_logs("-----开始-----" + stamp2str(int(time.time())) + "-------------")
        url = "https://mp.weixin.qq.com/cgi-bin/bizlogin?action=startlogin"
        params = {
            'username': ACCOUNT,
            'pwd': hashlib.md5(PASSWORD.encode(encoding='UTF-8')).hexdigest(),
            'imgcode': '',
            'f': 'json',
        }
        response = self.session.post(url, data=params, headers=self.headers)
        if response.status_code == 200:
            target = response.content.decode('utf-8')
        print(target)
        self.cookies = self.session.cookies
        self.add_logs("获取cokkies成功")
        time.sleep(5)
        self.get_wechat_login_qrcode()

    def get_wechat_login_qrcode(self):
        url = "https://mp.weixin.qq.com/cgi-bin/loginqrcode?action=getqrcode&param=4300"
        response = self.session.get(url, headers=self.headers)
        with open(QRIMG_PATH, 'wb') as f:
            f.write(response.content)
            f.close()
        self.send_QRcode()

    def send_QRcode(self):
        requests.post(EMAIL_PATH, json={"qrcode_path": QRIMG_PATH})
        self.add_logs("等待管理员扫码")

    def get_token(self):
        url = "https://mp.weixin.qq.com/cgi-bin/bizlogin?action=login"
        data = {
            'f': 'json',
            'ajax': 1,
            'random': random.random()
        }
        response = self.session.post(url, data=data, headers=self.headers)
        json = jsons.loads(response.text)
        redirect_url = json[
            "redirect_url"
        ]
        self.token = redirect_url[redirect_url.rfind('=') + 1:len(redirect_url)]
        self.add_logs("成功获取token")
        time.sleep(10)

    def check_login(self):
        cnt = 0  # 等待120s 扫码
        out_of_time = 1
        while cnt <= 12:
            url = "https://mp.weixin.qq.com/cgi-bin/loginqrcode?action=ask&token=&lang=zh_CN&f=json&ajax=1"
            response = self.session.get(url, headers=self.headers)
            json = jsons.loads(response.text)
            if json["status"] == 1:
                self.get_token()
                out_of_time = 0
                self.add_logs("登录成功")
                return out_of_time
            time.sleep(10)
            cnt += 1
        self.add_logs("扫码超时，登录失败")
        return out_of_time

    def get_article_links(self):
        # 每个公众号的唯一标识，扔进数据库里面
        page = 1
        cur_time = int(time.time())
        max_tim = 0
        self.LoadLATEST_TIME()
        print("start-get-link-" + self.name)
        self.add_logs("上次文章更新时间:" + stamp2str(self.LATEST_TIME))
        while cur_time > self.LATEST_TIME:
            begin = (page - 1) * 5
            requestUrl = "https://mp.weixin.qq.com/cgi-bin/appmsg?action=list_ex"
            params = {
                'begin': str(begin),
                'count': 5,
                'fakeid': self.fakeid,
                'type': 9,
                'token': self.token,
                'lang': "zh_CN",
                'f': 'json',
                'ajax': 1,
                'query': ''
            }
            search_response = requests.get(requestUrl, cookies=self.cookies, params=params, headers=self.headers)
            re_text = search_response.json()
            app_msg_list = re_text.get("app_msg_list")
            for i in app_msg_list:
                tim = i["create_time"]
                if tim > max_tim:
                    max_tim = tim
                if tim <= cur_time:
                    cur_time = tim
                if cur_time <= self.LATEST_TIME:
                    break
                res = {
                    "title": i["title"],
                    "link": i["link"],
                    "time": tim,
                    "content": ""
                }
                self.articles.append(res)
                print(res)
                self.add_logs("获取一个文章链接" + jsons.dumps(res))
                time.sleep(5)
            page += 1
            time.sleep(10)
            self.add_logs("完成一页，反爬措施启动中当前时间戳->" + str(cur_time))
        self.SaveLATEST_TIME(max_tim)
        self.add_logs("当前文章更新时间:" + stamp2str(max_tim))

    def save_article(self):
        print("start-save-articles")
        if len(self.articles) == 0:
            self.add_logs("无需更新")
            return
        self.add_logs("开始保存当前链接")
        db.session.execute(
            UrlModel.__table__.insert(),
            [{"title": i["title"],
              "href": i["link"],
              "time": stamp2str(i["time"]),
              "category_id": self.category_id,
              "enable": 1,
              "finish": 1,
              "details": self.name,
              "content": i["content"]
              }
             for i in
             self.articles]
        )
        db.session.commit()
        self.add_logs("保存成功")

    def download_articles(self):
        n = len(self.articles)
        if n == 0:
            self.add_logs("没有需要下载的文章")
            return
        print("start-get-article")
        for i in range(n):
            url = self.articles[i]["link"]
            title = self.articles[i]["title"]
            tim = self.articles[i]["time"]
            save_path = self.download_one_article_local(title, tim, url)
            self.articles[i]["content"] = save_path
            time.sleep(25)

    def deal_with_article(self, soup, file_name, tar_path):
        # 去除文首文尾
        js_content = soup.find(name="div", attrs={'id': 'js_content'})
        if js_content.find_previous_sibling():
            for s in js_content.find_previous_sibling():
                s.extract()

        if js_content.find_next_sibling():
            for s in js_content.find_next_sibling():
                s.extract()
        imgs = soup.find_all('img')
        # 将页面上图片的链接加入list
        urls = []
        for img in imgs:
            if 'data-src' in str(img):
                urls.append(img['data-src'])
            elif 'src=""' in str(img):
                pass
            elif "src" not in str(img):
                pass
            else:
                urls.append(img['src'])

        # 遍历所有图片链接，将图片保存到github指定文件夹，图片名字用0，1，2...
        i = 0
        domain = 'https://mp.weixin.qq.com/s'
        for each_url in urls:  # 看下文章的图片有哪些格式，一一处理
            if each_url.startswith('//'):
                new_url = 'https:' + each_url
                r_pic = requests.get(new_url)
            elif each_url.startswith('/') and each_url.endswith('gif'):
                new_url = domain + each_url
                r_pic = requests.get(new_url)
            elif each_url.endswith('png') or each_url.endswith('jpg') or each_url.endswith('gif') or each_url.endswith(
                    'jpeg'):
                r_pic = requests.get(each_url)
            time.sleep(5)
            file_name = file_name[:-5] + "_" + str(i) + ".jpeg"
            save_img_path = upload_to_github(r_pic.content, file_name, tar_path, "上传图片" + stamp2str(int(time.time())))
            time.sleep(12)

            # 更新替换
            for img in imgs:
                if 'data-src' in str(img):
                    img.attrs['data-src'] = save_img_path
                elif 'src=""' in str(img):
                    pass
                elif "src" not in str(img):
                    pass
                else:
                    img.attrs['src'] = save_img_path
        return soup

    def deal_with_article_local(self, soup, file_name, tar_path):
        # 去除文首文尾
        js_content = soup.find(name="div", attrs={'id': 'js_content'})
        if js_content.find_previous_siblings(['h1', 'h2', 'div']):
            for s in js_content.find_previous_siblings(['h1', 'h2', 'div']):
                s.extract()

        if js_content.find_next_siblings(['h1', 'h2', 'div']):
            for s in js_content.find_previous_siblings(['h1', 'h2', 'div']):
                s.extract()

        if soup.find(name="div", attrs={'class': 'qr_code_pc_inner'}):
            soup.find(name="div", attrs={'class': 'qr_code_pc_inner'}).extract()

        imgs = soup.find_all('img')
        # 将页面上图片的链接加入list
        urls = []
        for img in imgs:
            if 'data-src' in str(img):
                urls.append(img['data-src'])
            elif 'src=""' in str(img):
                pass
            elif "src" not in str(img):
                pass
            else:
                urls.append(img['src'])

        # 遍历所有图片链接，将图片保存到github指定文件夹，图片名字用0，1，2...
        i = 0
        domain = 'https://mp.weixin.qq.com/s'
        http_path = []
        for each_url in urls:  # 看下文章的图片有哪些格式，一一处理
            if each_url.startswith('//'):
                new_url = 'https:' + each_url
                r_pic = requests.get(new_url)
            elif each_url.startswith('/') and each_url.endswith('gif'):
                new_url = domain + each_url
                r_pic = requests.get(new_url)
            elif each_url.endswith('png') or each_url.endswith('jpg') or each_url.endswith(
                    'gif') or each_url.endswith(
                'jpeg'):
                r_pic = requests.get(each_url)
            time.sleep(5)

            img_name = file_name[:-5] + "_" + str(i) + ".jpeg"
            i += 1
            save_img_path = tar_path + img_name
            with open(save_img_path, "wb") as f:
                f.write(r_pic.content)

            # 更新替换
            http_path.append(LOCAL_IMG_SAVE_ROOT + '/' + save_img_path)
        i = 0
        for img in imgs:
            if 'data-src' in str(img):
                img.attrs['data-src'] = http_path[i]
            elif 'src=""' in str(img):
                pass
            elif "src" not in str(img):
                pass
            else:
                img.attrs['src'] = http_path[i]
            i += 1
        return soup

    def download_one_article_local(self, title, tim, url):
        print("start-get-html")
        req = requests.get(url, cookies=self.cookies, headers=self.headers)
        req.encoding = req.apparent_encoding
        soup = BeautifulSoup(req.text, 'html.parser')
        file_name = str(self.category_id) + "_" + self.fakeid + "_" + str(tim) + "_" + str(
            int(time.time())) + ".html"
        # 上传目标路径
        tar_path = SAVE_ROOT_PATH + '/' + file_name[:-5] + '/'
        if not os.path.exists(tar_path):
            os.makedirs(tar_path)
        # 处理
        try:
            soup = self.deal_with_article_local(soup, file_name, tar_path)
        except Exception as e:
            print(e)
        html = soup.prettify("utf-8")
        save_html_path = tar_path + file_name
        with open(save_html_path, "wb") as f:
            f.write(html)
        return save_html_path

    def download_one_article(self, title, tim, url):
        print("start-get-html")
        req = requests.get(url, cookies=self.cookies, headers=self.headers)
        req.encoding = req.apparent_encoding
        soup = BeautifulSoup(req.text, 'html.parser')
        file_name = str(self.category_id) + "_" + self.fakeid + "_" + str(tim) + "_" + str(
            int(time.time())) + ".html"
        # 上传目标路径
        tar_path = GitHUbTarDir + '/' + file_name[:-5]
        # 处理
        try:
            soup = self.deal_with_article(soup, file_name, tar_path)
        except Exception as e:
            print(e)
        html = soup.prettify("utf-8")
        # 上传
        save_html_path = upload_to_github(html, file_name, tar_path, "上传html" + stamp2str(int(time.time())))
        time.sleep(5)
        self.add_logs("文章:" + title + "------------>is ok")
        return save_html_path

    def add_logs(self, msg):
        self.logs.append(msg)

    def save_logs(self):
        self.add_logs("----结束------" + stamp2str(int(time.time())) + "-------------")
        with open(LOG_SAVE_PATH, "a") as f:
            for log in self.logs:
                f.write(log)
                f.write('\n')
            f.write('\n')
