import requests
import json
from bs4 import BeautifulSoup


class Item(object):
    """网易国际新闻"""

    def __init__(self):
        self.url='https://temp.163.com/special/00804KVA/cm_guoji.js?callback=data_callback'
        self.headers={
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'
        }

    def get_url(self):
        url_list=[self.url]
        return url_list

    def get_html(self,url):
        response=requests.get(url=url,headers=self.headers)
        return response.content.decode('gbk')

    def get_data_content(self,response):
        soup=BeautifulSoup(response.text,'lxml')
        try:
            news_title=soup.select_one('h1.post_title').text
            news_time_post_info=soup.select_one('div.post_info').text.strip().replace(" ","").replace("\n", "").replace("举报", "")
            news_content=soup.select('div.post_body p')
            print('新闻标题：{}'.format(news_title))
            print('新闻时间：{}'.format(news_time_post_info))
            print("新闻内容：")
            for c in news_content:
                print(c.get_text().strip().replace('\n',' '))
                print()
        except AttributeError as e:
            pass

    def get_data(self,response):
        response=response
        response=response.replace('data_callback(',' ').replace(')',' ')
        for r in json.loads(response):
            news_url=r['docurl']
            response=requests.get(url=news_url,headers=self.headers)
            self.get_data_content(response)

    def get_save(self,get_data):
        return get_data

    def run(self):
        all_url=self.get_url()
        for url in all_url:
            response=self.get_html(url)
            data=self.get_data(response)
            self.get_save(data)


def main():
    item=Item()
    item.run()


if __name__ == '__main__':
    main()