﻿import requests
import json
from bs4 import BeautifulSoup
import pymysql
import uuid


class News:
    # 下面定义了2个类变量
    cms_id = ""
    url = ""
    text = ""
    title = ""
    summary = ""
    source = ""
    img = ""
    update_time = ""
    inTime = ""

    # 下面定义了一个say实例方法
    def defprint(self, content):
        print(content)


def getRequestText(url):
    try:
        r = requests.get(url, timeout=30)
        r.raise_for_status()
        r.encoding = 'utf-8'
        return r.text
    except:
        return ""


def getNews(url):
    json_str = getRequestText(url)
    newsList_dic = json.loads(json_str)
    newsList = []
    # 请求成功，开始提取数据
    if newsList_dic["msg"] == "success":

        i = 0
        # 循环新闻列表
        for news_dic in newsList_dic["data"]["list"]:

            i += 1
            news = News()
            #             if i == 2:
            # 只提取普通新闻，其他类型，例如专题都不提取。
            if news_dic["article_type"] == 0:
                news.cms_id = news_dic["cms_id"]
                news.url = news_dic["url"]
                news.title = news_dic["title"]
                news.source = news_dic["media_name"]
                news.img = news_dic["img"]
                news.update_time = news_dic["update_time"]
                news.inTime = news_dic["publish_time"]
                htmlRequest = requests.get("https://new.qq.com/rain/a/" + news_dic["cms_id"])
                htmlRequest.encoding = 'utf-8'
                soup = BeautifulSoup(htmlRequest.text, 'lxml')
                image_list = soup.find_all("img")
                for image in image_list:
                    image["src"] = "https:" + image["src"]  # src_list[i]类型也是<class 'bs4.element.Tag'>
                #                 print(news.__dict__)
                news.text = soup.find(attrs={'class': 'content-article'})
                if news.text != None:
                    newsList.append(news)

        return newsList
    # soup = BeautifulSoup(html, "html.parser")
    # print(soup.get_text())


def main():
    # url = "https://i.news.qq.com/trpc.qqnews_web.kv_srv.kv_srv_http_proxy/list?sub_srv_id=24hours&srv_id=pc&offset=0&limit=20&strategy=1&ext={%22pool%22:[%22top%22],%22is_filter%22:7,%22check_type%22:true}"
    # newsList = getNews(url)
    newsList = spider_news()

    conn = pymysql.connect(
        host='127.0.0.1',
        port=3306,
        user='root',
        password='123456',
        database='cov',
        charset='utf8'
    )
    cursor = conn.cursor()
    for news in newsList:
        sql = "insert into news (id, cms_id, url, title, source, img, text,update_time, in_time) \
            values('%s','%s','%s','%s','%s','%s','%s','%s','%s') ON DUPLICATE KEY UPDATE url='%s';" % \
              (uuid.uuid4(), news.cms_id, news.url, news.title, news.source, news.img, news.text, news.update_time,
               news.inTime,
               news.url)

        try:
            # 执行sql语句
            cursor.execute(sql)
            # 执行sql语句
            conn.commit()
            print("添加一条新闻")
        except Exception as e:
            print(e)
            # 发生错误时回滚
            conn.rollback()
            print("错误")
    #         sql = 'select * from news where cms_id=%s' % (news.cms_id)
    #         cursor.execute(sql)
    #         cursor.fetchmany(3)
    #         # 光标按绝对位置移动1
    #         cursor.scroll(1, mode="absolute")
    print("共抓取" + str(len(newsList)) + "条新闻")
    conn.close()


def spider_news():
    # 爬取新浪
    newsList = []
    url = 'http://tags.news.sina.com.cn/%E7%96%AB%E6%83%85'
    text = getRequestText(url)
    soup = BeautifulSoup(text, 'lxml')
    soup_select = soup.select('ul.feeds_list li')
    for item in soup_select:
        news = News()
        news.url = item.h3.a.get('href')
        news.title = item.h3.a.get('title')
        news.update_time = item.select('.feeds_li_meta span a')[0].string
        news.inTime = item.select('.feeds_li_meta span a')[0].string
        # 图片
        print(item.select('.feeds_li_cont .feeds_li_p img'))
        if item.select('.feeds_li_cont .feeds_li_p img'):
            item.select('.feeds_li_cont .feeds_li_p img')[0].get('src')
            news.img = item.select('.feeds_li_cont .feeds_li_p img')[0].get('src')
        else:
            news.img = ''
        news.text = item.select('.feeds_li_cont .feeds_li_t p a')[0].string
        news.source = '新浪新闻'
        newsList.append(news)
    return newsList


if __name__ == '__main__':
    main()
