from bs4 import BeautifulSoup
from datetime import datetime

from django.conf.global_settings import MEDIA_ROOT
from wordcloud import WordCloud
import jieba
import os
import requests
import chardet
import math
import re
from ..models import NewsArticle
from django.db.models import Max

base_url = "https://news.buaa.edu.cn/"
url = "https://news.buaa.edu.cn/zhxw.htm"
start_url = "https://news.buaa.edu.cn/zhxw/"


class News:
    def __init__(self, url, path) -> None:
        self.url = url
        self.path = path

    def getNews(self, earliest):
        pageNums = self.getNewPages(10)
        # 获取数据库中已存在的最新新闻日期
        last_date = NewsArticle.objects.aggregate(Max('pub_date'))['pub_date__max']
        if last_date is None:
            last_date = datetime.strptime("1900-01-01", "%Y-%m-%d").date()
        else:
            last_date = last_date  # 已经是 datetime.date 对象python manage.py runserver
        for page in range(1, pageNums):
            response = requests.get(start_url + str(page) + ".htm")
            response.encoding = "utf-8"
            soup = BeautifulSoup(response.text, 'html.parser')
            h2s = soup.find_all("h2")
            for h2 in h2s:
                # 新闻标题
                title = h2.find("a").text
                # 新闻发布时间
                time = h2.find("em").text.replace("[", "").replace("]", "")
                # 新闻详情链接
                link = base_url + self.extract_after_first_slash(h2.find("a")['href'])
                # print("title:{},time:{},link:{}".format(title,time,link))
                # 如果新闻时间早于已经获取的最新新闻，则跳过当前新闻
                if datetime.strptime(time, "%Y-%m-%d").date() < last_date:
                    continue
                # 抓取的新闻时间不能晚于 earliest
                if datetime.strptime(time, "%Y-%m-%d") < datetime.strptime(earliest, "%Y-%m-%d"):
                    continue

                # 获取新闻详情
                resp = requests.get(link)
                resp.encoding = "utf-8"
                soup = BeautifulSoup(resp.text, 'html.parser')
                content = soup.find("div", class_="v_news_content")
                if (content is None):
                    continue
                ps = content.find_all("p")
                content_text = ''  # 新闻详情
                for p in ps:
                    content_text += p.text

                title = title.replace("/", "_")
                # 保存新闻到数据库
                pub_date = datetime.strptime(time, "%Y-%m-%d").date()
                self.saveNews(title=title, link=link, content=content_text, pub_date=pub_date)

    def saveNews(self, title, link, content, pub_date):
        news_article, created = NewsArticle.objects.get_or_create(
            title=title,
            defaults={
                'link': link,
                'content': content,
                'pub_date': pub_date
            }
        )

    # 获取新闻页数，tips每页tips条新闻
    def getNewPages(self, tips):
        response = requests.get(self.url)
        response.encoding = "utf-8"
        soup = BeautifulSoup(response.text, 'html.parser')
        # 获取新闻总条数
        newNums = re.findall(r"\d+", soup.find('td', {'id': "fanye50834"}).text)[0]
        print(newNums)
        if newNums:
            return math.ceil(int(newNums) / tips)

    def extract_after_first_slash(self, s):
        match = re.search(r'/(.*)', s)
        if match:
            return match.group(1)
        else:
            return None

    def isLater(self, last_date):
        # 获取当前日期
        current_date = datetime.now()

        return True if current_date > last_date else False

    def maxCaptTime(self, path):
        filenames = self.get_filenames(path)
        dates = []
        for filename in filenames:
            date_object = datetime.strptime(filename[:10], "%Y-%m-%d")
            dates.append(date_object)

        if len(dates) == 0:
            return datetime.strptime("1900-01-01", "%Y-%m-%d")

        return max(dates)

    def get_filenames(self, directory):
        return [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]

    # 分词
    def cutwordsCount(self, path):
        new_set = {}
        filenames = self.get_filenames(path)
        for filename in filenames:
            txt = open(path + filename, "r", encoding='utf-8').read()

            txt = "".join(re.findall('[\u4e00-\u9fa5]+', txt, re.S))  # 去标点符号
            jieba.load_userdict('./tests/user_dict.txt')

            result = []
            stopwords = self.readStopword('./tests/stopwords.txt')
            for word in jieba.lcut(txt):
                if word not in stopwords and len(word) > 1:  # 去停用词和单词
                    result.append(word)

            # {新闻名:分词后的列表}
            new_set[filename[10:len(filename) - 4]] = ' '.join(result)

        return new_set

    def readStopword(self, path):
        with open(path, encoding='utf-8') as f:
            con = f.readlines()
            stop_words = set()  # 集合可以去重
            for i in con:
                i = i.replace("\n", "")  # 去掉读取每一行数据的\n
                stop_words.add(i)
            return stop_words

    def genWordCloud(self):
        dd = self.cutwordsCount()
        for title, text in dd.items():
            wordcloud = WordCloud(
                width=800,
                height=400,
                font_path="./tests/MSYH.TTC",
                background_color='white'
            ).generate(text)

            image_path = f'wordclouds/{title}.png'
            # 保存词云图像到指定目录
            wordcloud.to_file(os.path.join(MEDIA_ROOT, image_path))

            # 更新数据库中的 wordcloud_image 字段
            NewsArticle.objects.filter(title=title).update(wordcloud_image=image_path)
    def __str__(self) -> str:
        return "获取学校新闻当所有新闻，并保存到.txt文件"