# -*- coding:utf-8 -*-
# @Time      :2022/10/25 17:36
# @Author    :To me
# @Email     :2086351502@qq.com
# @File      :get_gxun_news.py
# @File      :PyCharm

from requests.adapters import HTTPAdapter
from requests_html import HTMLSession
import random
import re
from io import BytesIO
import requests
from bs4 import BeautifulSoup
import time
import docx
from docx.oxml.ns import qn
from docx.shared import Pt, Inches
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT

from gxun_news.settings import NEWS_DOCX_ROOT


class Get_gxun_news:
    # 初始化
    def __init__(self):
        session = HTMLSession()
        session.mount("https://", HTTPAdapter(max_retries=3))
        session.mount("http://", HTTPAdapter(max_retries=3))
        url = 'https://www.gxmzu.edu.cn/mdxww1/mdyw.htm'
        headers = {
            'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.26"
        }
        response = session.get(url, headers=headers, timeout=(5, 6))
        response.encoding = response.apparent_encoding
        page_source = response.text
        bs = BeautifulSoup(page_source, 'html.parser')
        list_node = bs.select('tr td ul li')
        self.all_list = []
        self.session = session
        self.headers = headers
        self.list_node = list_node

    def save_img(self, img_url):
        content = requests.get(img_url).content
        hexData = content.hex()
        hexData.replace('\r\n', '')
        pic = bytes.fromhex(hexData)
        bytes_stream = BytesIO(pic)

        return bytes_stream

    def save_docx(self):
        for data_dict in self.all_list:
            doc = docx.Document()
            area = qn('w:eastAsia')
            doc.styles['Normal'].font.name = 'Times New Roman'
            doc.styles['Normal']._element.rPr.rFonts.set(area, u'宋体')
            # 得到列表字典内相应字段
            title_content = data_dict.get('news_title_content')
            news_title_url = data_dict.get('news_title_url')
            release_time = data_dict.get('release_time')
            img = data_dict.get('img')
            news_content = data_dict.get('news_content')
            # 写入word文档
            # 一级标题 新闻标题
            title_1 = doc.add_heading(level=0).add_run(title_content)
            title_1.font.name = u'黑体'
            title_1._element.rPr.rFonts.set(area, u'黑体')
            # 二级标题 新闻发布时间
            title_2 = doc.add_paragraph(release_time)
            title_2.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
            # 三级标题 新闻转载处
            deal = f'至{news_title_url}转载'
            title_3 = doc.add_paragraph(deal)
            title_3.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
            i = 0
            # 写入新闻内容
            if not news_content:
                news_content = f'请转载至{news_title_url}'
                paragraph = doc.add_paragraph(news_content)
                paragraph.paragraph_format.first_line_indent = Pt(20)
            else:
                for index in range(len(news_content)):
                    if news_content[index] == 'img':
                        try:
                            has_img_name = news_content[index + 1]
                            if img.get(has_img_name):
                                continue
                        except:
                            pass
                        img_name = f'img_name{i}'
                        img_url = img.get(img_name)
                        if img_url:
                            img_url = self.save_img(img_url)
                            doc.add_picture(img_url, Inches(5))
                            doc.paragraphs[-1].alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
                            i += 1
                            continue
                        continue
                    img_url = img.get(news_content[index])
                    if img_url:
                        img_url = self.save_img(img_url)
                        doc.add_picture(img_url, Inches(5))
                        doc.paragraphs[-1].alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
                        paragraph = doc.add_paragraph(news_content[index])
                        paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
                        continue
                    paragraph = doc.add_paragraph(news_content[index])
                    paragraph.paragraph_format.first_line_indent = Pt(20)
            doc.save(f"{NEWS_DOCX_ROOT}//{title_content}.docx")
            print('--添加中--')
        print('--添加结束--')
        return

    #  得到内容,代码重复,写成方法调用
    def get_news_data(self, node_a, new_dict):
        news_content = []
        img = []
        img_dict = {}
        news_title_content = node_a.attrs["title"]  # 新闻标题
        news_title_url = node_a.attrs["href"]  # 新闻网址
        if '../../' in news_title_url:
            news_title_url = news_title_url.replace('../../', 'https://www.gxmzu.edu.cn/')
        if '..' in news_title_url:
            news_title_url = node_a.attrs["href"].replace('..', 'https://www.gxmzu.edu.cn')
        # 得到新闻内容
        try:
            content = self.session.get(news_title_url, headers=self.headers, timeout=(5, 6))
            content.encoding = content.apparent_encoding
            news_web_content = content.text
            bs = BeautifulSoup(news_web_content, 'html.parser')
            content_list = bs.select("div#vsb_content_2 p")
            # 清洗处理新闻内容
            for node in content_list:
                img_node = node.select_one('img.img_vsb_content')
                # 得到新闻图片地址
                if img_node:
                    main_img_url = img_node.attrs["src"]
                    img_url = 'https://www.gxmzu.edu.cn' + main_img_url
                    img.append(img_url)

                # 得到新闻图片名
                if len(img) >= 2:
                    img_name = ''
                    for i in range(len(img)):
                        img_name = 'img_name' + str(i)
                        img_dict[img_name] = str(img[i])
                else:
                    result = re.search('<span style="(.*?), SimKai(.*?)">(?P<ext>.*?)</span>', str(node), re.DOTALL)
                    if result:
                        img_name = result.groupdict().get("ext")
                        if len(img) == 1:
                            try:
                                img_dict[img_name] = img[0]
                                img.clear()
                            except:
                                img_name = node.select_one('p span').text
                                new_dict[img_name] = img
                                img.clear()

                try:
                    # 新闻详细内容
                    main_content = node.text
                    if not main_content:
                        main_content = 'img'
                    news_content.append(main_content)
                except:
                    main_content = ''

            if len(img) == 1:
                img_name = 'img_name0'
                img_dict[img_name] = str(img[0])
                img.clear()

            if img:
                img_name = ''

            # 将新闻内容写入字典并存入列表中
            new_dict['img'] = img_dict
            new_dict['news_title_content'] = news_title_content
            new_dict['news_title_url'] = news_title_url
            new_dict['news_content'] = news_content
            self.all_list.append(new_dict)
            time.sleep(random.random())
            time.sleep(1)
            print('----进行中------')

        except:
            new_dict['news_title_content'] = news_title_content
            new_dict['news_title_url'] = news_title_url
            new_dict['news_content'] = news_content
            self.all_list.append(new_dict)
            time.sleep(random.random())
            print('----进行中------')

    # 得到第一页所有新闻
    def get_all_news(self):
        for node_li in self.list_node:
            node_a = node_li.select_one('div a.c54691')
            node_span = node_li.select_one('div span.c54691_date')
            new_dict = {
                'news_title_content': '',
                'news_title_url': '',
                'release_time': '',
            }
            if node_span:
                release_time = node_span.text
                release_time = release_time.replace('年', '-')
                release_time = release_time.replace('月', '-')
                release_time = release_time.replace('日', '')
                new_dict['release_time'] = release_time
            if node_a:
                self.get_news_data(node_a, new_dict)
        self.save_docx()
        print('执行完毕')
        return self.all_list

    # 得到今日所有新闻
    def get_today_news(self):
        for node_li in self.list_node:
            node_a = node_li.select_one('div a.c54691')
            node_span = node_li.select_one('div span.c54691_date')
            new_dict = {
                'news_title_content': '',
                'news_title_url': '',
                'release_time': '',
            }
            if node_span:
                release_time = node_span.text
                now_time = time.strftime("%Y年%m月%d日")
                if release_time == now_time:
                    release_time = node_span.text
                    release_time = release_time.replace('年', '-')
                    release_time = release_time.replace('月', '-')
                    release_time = release_time.replace('日', '')
                    new_dict['release_time'] = release_time
                    if node_a:
                        self.get_news_data(node_a, new_dict)
        self.save_docx()
        print('执行完毕')
        return self.all_list

    # 得到第一页内发布的指定时间新闻
    def get_custom_news(self, custom_time):
        for node_li in self.list_node:
            node_a = node_li.select_one('div a.c54691')
            node_span = node_li.select_one('div span.c54691_date')
            new_dict = {
                'news_title_content': '',
                'news_title_url': '',
                'release_time': '',
            }
            if node_span:
                release_time = node_span.text
                if release_time == custom_time:
                    release_time = node_span.text
                    release_time = release_time.replace('年', '-')
                    release_time = release_time.replace('月', '-')
                    release_time = release_time.replace('日', '')
                    new_dict['release_time'] = release_time
                    if node_a:
                        self.get_news_data(node_a, new_dict)
        self.save_docx()
        print('执行完毕')
        return self.all_list


