import requests
from bs4 import BeautifulSoup
import re
import time
from pprint import pprint
from urllib.parse import urljoin
import collections

class Item(object):
    """菜牛养生资讯爬虫"""
    def __init__(self):
        self.headers={
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36',
            'Cookie': 'UM_distinctid=17684d7fe50629-0b107a97feb611-5a472316-144000-17684d7fe51365; CNZZDATA1278013978=884472935-1608542403-https%253A%252F%252Fwww.baidu.com%252F%7C1608547890; Hm_lvt_1cca410710074a46cab918f2116fbe1e=1608546517,1608550543,1608552159; Hm_lpvt_1cca410710074a46cab918f2116fbe1e=1608552173'
        }
        self.url='https://www.cnys.com/article/list_2_{}.html'

    def get_url(self):
        url_list=[self.url.format(i) for i in range(1,101)]

        return url_list

    def get_response(self,url):
        response=requests.get(url=url,headers=self.headers)
        return response.content.decode()

    def get_content(self,response):
        soup=BeautifulSoup(response,'lxml')
        return soup

    def get_data(self,response):
        required_data = []
        base = 'https://www.cnys.com/'
        soup=BeautifulSoup(response,'lxml')
        contents = soup.select('div.leftLists a')
        for c in contents:
            info_dict = collections.OrderedDict()
            info_dict['title'] =  c['title']
            info_dict['image_src'] = c.select_one('.nationalList .nationalListImg img')['data-original']
            info_dict['introduction'] = c.select_one('.nationalList .nationalListText p').text.strip().replace('导语：',"")
            info_dict['content_url'] = urljoin(base, c['href'])
            main_body = self.get_response(urljoin(base, c['href']))
            soup_body = self.get_content(main_body)
            main_context = soup_body.select('div.reads')
            for m in main_context:
                info_dict['main_context'] = m.get_text().strip().replace('\n',"")


            required_data.append(info_dict.items())

        pprint(required_data)


    def get_save(self,get_data):
        return get_data

    def run(self):
        url_info=self.get_url()

        for url in url_info:
            response = self.get_response(url)
            html = self.get_data(response)
            self.get_save(html)


def main():
    s=time.time()
    item=Item()
    item.run()
    print('消耗时间：',time.time()-s)


if __name__ == '__main__':
    main()