# import requests, json
# from bs4 import BeautifulSoup
#
#
# class XinLang(object):
#
#     def __init__(self, page):
#         # 准备初始化数据
#         self.url = f"http://mil.news.sina.com.cn/roll/index.d.html?cid=57918&page={page}"
#         self.headers = {
#             "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36 Edg/85.0.564.44"
#         }
#
#     def send_request(self):
#         """
#         发送请求获取响应的方法
#         :return: 返回响应的内容
#         """
#         response = requests.get(self.url, headers=self.headers)
#         content = response.content.decode()
#         # print(content)
#         return content
#
#     def parse_data(self, content):
#         """
#         解析数据的方法
#         :param data:
#         :return:
#         """
#         soup = BeautifulSoup(content, 'lxml')
#         # 层级选择器
#         news_list = soup.select(".linkNews li a")
#         # print(news_list)
#
#         new_list = []
#         # 获取文本内容get_text()
#         for new in news_list:
#             new_dict = {}
#             new_dict["title"] = new.get_text()
#             # 获取属性 get('属性的名字')
#             new_dict["url"] = new.get("href")
#             new_list.append(new_dict)
#
#         return new_list
#
#     def save_data(self, content, new_list):
#         # 1.获取新浪新闻页”中国军情指定页数获取“的爬虫设计为一个类
#         with open(f'中国军情第{i}页.html', 'w') as f:
#             f.write(content)
#
#         # 2.获取”中国军情“N页的爬虫,写入json文件中, 每一页的数据写入一个json文件
#         # 提取的字段： 标题，新闻的url地址
#         # 结构：[{"title": "标题",  ”url“：”url地址“}, .....]
#         with open(f'中国军情第{i}页json数据.json', 'w') as f:
#             json_data = json.dumps(new_list, ensure_ascii=False)
#             f.write(json_data)
#
#     def run(self):
#         # 发送请求 获取响应
#         content = self.send_request()
#         # 提取数据
#         new_list = self.parse_data(content)
#         self.save_data(content, new_list)
#
#
# if __name__ == '__main__':
#     page = int(input("请输入要获取的页数："))
#     for i in range(1, page+1):
#         xinlang = XinLang(i)
#         xinlang.run()


"""
1. 确定url地址;  当前我在浏览器地址栏中请求的urld地址的响应中是否有我想要抓取的数据
    有：直接去请求地址栏的地址
    没有：抓包
2. 发送请求 获取响应
3. 抓取数据  --- 确定数据在html源码中的位置，
4. 保存
"""
import requests
from bs4 import BeautifulSoup
import json


class Sina_news:
    def __init__(self, page):  # 5
        url = f"http://mil.news.sina.com.cn/roll/index.d.html?cid=57918&page="
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36"
        }
        self.url_list = [url + str(pagenum) for pagenum in range(1, page + 1)]  # range(1, 6) #
        print(self.url_list)

    def send_request(self, url):
        response = requests.get(url, headers=self.headers)
        return response

    def parse_data(self, response):
        soup = BeautifulSoup(response.content.decode(), "lxml")

        news_list = soup.select(".linkNews li a")

        news_list1 = []
        for new in news_list:
            new_dict = {}
            new_dict["title"] = new.get_text()
            new_dict["url"] = new.get("href")
            news_list1.append(new_dict)

        return news_list1

    def save_data(self, data, page):
        news_json = json.dumps(data, ensure_ascii=False)  # list -> json
        with open(f'sina_news_{page}', 'w') as f:
            f.write(news_json)

    def run(self):
        # 1. 确定爬取的目标地址, url地址， header， 请求的参数
        for url in self.url_list:
            response = self.send_request(url)  # 2. 发起请求， 返回结果
            news_list = self.parse_data(response)  # 3. 提取数据

            index = str(self.url_list.index(url) + 1)

            # 4. 保存数据
            self.save_data(news_list, index)


if __name__ == '__main__':
    page = int(input('输入获取的页数：'))
    sina_news = Sina_news(page)
    sina_news.run()
