from applications.common.curd import get_one_by_id
from applications.common.utils.trans_time import stamp2str, str2stamp
from applications.models import UrlModel, LatestTimeModel
from applications.common.spider.config.scau_math_mesage_config import *
from bs4 import BeautifulSoup
from applications.extensions import db, mail
import time
import requests
import os


class ScauMathMessage(object):
    def __init__(self):
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.36 Edg/104.0.1293.70",
            "Host": "info.scau.edu.cn"
        }
        self.logs = []
        self.category_id = 1
        self.LATEST_TIME = 0
        self.pos = ""
        self.name = ""
        self.flag_name = ""
        self.articles = []
        pass

    def reset_math_message_tar(self, tar):
        self.pos = tar["id"]
        self.flag_name = tar["fakeid"]
        self.name = tar["name"]
        self.category_id = tar["category_id"]
        self.articles = []

    def LoadLATEST_TIME(self):
        t = get_one_by_id(model=LatestTimeModel, id=self.pos)
        self.LATEST_TIME = t.time

    def SaveLATEST_TIME(self, cur_time):
        t = get_one_by_id(model=LatestTimeModel, id=self.pos)
        t.time = int(cur_time)
        db.session.commit()

    def get_msg_list(self, html_list):
        # 获取list中的标题、时间、简介、链接
        # 返回对象数组
        ans = []
        for s in html_list:
            if s.name != 'div':
                continue
            title = s.find(name='div', attrs={'class': 'title'}).get_text()
            date = s.find(name='div', attrs={'class': 'date'}, text=True).get_text()
            tmp = s.find(name='div', attrs={'class': 'desc'})
            href = tmp.find(name='a').attrs['href']
            desc = tmp.find(name='a').get_text()
            res = {
                "title": title,
                "date": str2stamp(date + " 00:00:00"),
                "href": href,
                "desc": desc.replace(u'\xa0', '')
            }
            ans.append(res)
        return ans

    def get_msg_link(self):
        page = 1
        cur_time = int(time.time())
        max_tim = 0
        self.LoadLATEST_TIME()
        print("start-get-link-" + self.name)
        while cur_time > self.LATEST_TIME:
            request_url = "https://info.scau.edu.cn/3772/list" + str(page) + ".htm"
            print(request_url)
            req = requests.get(request_url, headers=self.headers)
            req.encoding = req.apparent_encoding
            soup = BeautifulSoup(req.text, 'html.parser')
            html_list = soup.find(name='div', attrs={'id': 'wp_news_w3'}).children
            msg_list = self.get_msg_list(html_list)
            for msg in msg_list:
                tim = msg["date"]
                if tim > max_tim:
                    max_tim = tim
                if tim <= cur_time:
                    cur_time = tim
                if cur_time <= self.LATEST_TIME:
                    break
                res = {
                    "title": msg["title"],
                    "link": msg["href"],
                    "time": tim,
                    "desc": msg["desc"],
                    "content": ""
                }
                self.articles.append(res)
                print(res)
                time.sleep(3)
            page += 1
            time.sleep(10)
            self.add_logs("完成一页，反爬措施启动中当前时间戳->" + str(cur_time))
        self.SaveLATEST_TIME(max_tim)
        self.add_logs("当前文章更新时间:" + stamp2str(max_tim))

    def deal_with_msg_local(self, soup, file_name, tar_path):
        container = soup.find(name='div', attrs={'class': 'container p-5'})
        if container.find_next_siblings(['div', 'h1', 'h2', 'h3']):
            for s in container.find_next_siblings(['div', 'h1', 'h2', 'h3']):
                s.extract()
        if container.find_previous_siblings(['div', 'h1', 'h2', 'h3']):
            for s in container.find_previous_siblings(['div', 'h1', 'h2', 'h3']):
                s.extract()
        content = container.find(name='div', attrs={'class': 'article-content'})
        if content.find_next_siblings(['div', 'h1', 'h2', 'h3']):
            for s in content.find_next_siblings(['div', 'h1', 'h2', 'h3']):
                s.extract()
        if content.find_previous_siblings(['div', 'h1', 'h2', 'h3']):
            for s in content.find_previous_siblings(['div', 'h1', 'h2', 'h3']):
                s.extract()

        if soup.find(name='footer'):
            soup.find(name='footer').extract()

        # 图片加头，附件保留
        domain = "https://info.scau.edu.cn"

        imgs = soup.find_all('img')
        urls = []
        for img in imgs:
            if 'src' in img.attrs:
                urls.append(img['src'])
            elif 'src=""' in img.attrs:
                pass
            elif "src" not in img.attrs:
                pass
            else:
                urls.append(img['original-src'])

        new_urls = []
        for each_url in urls:
            new_url = domain + each_url
            new_urls.append(new_url)

        cnt = 0
        for img in imgs:
            if 'src' in img.attrs:
                img.attrs['src'] = new_urls[cnt]
            elif 'src=""' in img.attrs:
                pass
            elif "src" not in img.attrs:
                pass
            else:
                img.attrs['original-src'] = new_urls[cnt]
            cnt += 1
        return soup

    def download_one_msg_local(self, tim, url):
        print("start-get-html")
        req = requests.get(url, headers=self.headers)
        req.encoding = req.apparent_encoding
        soup = BeautifulSoup(req.text, 'html.parser')
        file_name = str(self.category_id) + '_' + self.flag_name + '_' + str(tim) + '_' + str(
            int(time.time())) + ".html"
        tar_path = SAVE_ROOT_PATH + '/' + file_name[:-5] + '/'
        if not os.path.exists(tar_path):
            os.makedirs(tar_path)
        # 处理
        try:
            soup = self.deal_with_msg_local(soup, file_name, tar_path)
        except Exception as e:
            print(e)
        html = soup.prettify("utf-8")
        save_html_path = tar_path + file_name
        with open(save_html_path, "wb") as f:
            f.write(html)
        return save_html_path

    def download_msgs(self):
        n = len(self.articles)
        if n == 0:
            self.add_logs("没有需要下载的文章")
            return
        print("start-get-articles")
        for i in range(n):
            url = "https://info.scau.edu.cn" + self.articles[i]["link"]
            title = self.articles[i]["title"]
            tim = self.articles[i]["time"]
            save_path = self.download_one_msg_local(tim, url)
            self.articles[i]["content"] = save_path
            time.sleep(20)

    def save_msgs(self):
        print("start-save-articles")
        if len(self.articles) == 0:
            self.add_logs("无需更新")
            return
        self.add_logs("开始保存当前链接")
        db.session.execute(
            UrlModel.__table__.insert(),
            [{"title": i["title"],
              "href": i["link"],
              "time": stamp2str(i["time"]),
              "category_id": self.category_id,
              "enable": 1,
              "finish": 1,
              "details": self.name,
              "content": i["content"],
              "remark": i["desc"]
              }
             for i in
             self.articles]
        )
        db.session.commit()
        self.add_logs("保存成功")

    def add_logs(self, msg):
        self.logs.append(msg)

    def save_logs(self):
        self.add_logs("----结束------" + stamp2str(int(time.time())) + "-------------")
        with open(LOG_SAVE_PATH, "a") as f:
            for log in self.logs:
                f.write(log)
                f.write('\n')
            f.write('\n')
