﻿# encoding = utf-8
import requests
import json
from bs4 import BeautifulSoup
import pymysql
import uuid


class News :
    # 下面定义了2个类变量
    cms_id = ""
    url = ""
    text = ""
    title = ""
    summary = ""
    source = ""
    img = ""
    update_time = ""
    inTime = ""

    # 下面定义了一个say实例方法
    def defprint(self, content):
        print(content)


def getRequestText(url):
    try:
        r = requests.get(url, timeout=30)
        r.raise_for_status()
        # r.encoding = 'utf-8'
        return r.text
    except:
        return ""


def getNews(url):
    json_str = getRequestText(url)
    # print(json_str)
    newsList_dic =json.loads(json_str)
    newsList = []
    # 请求成功，开始提取数据
    if newsList_dic["msg"]=="success" :
        
        i=0
        # 循环新闻列表
        for news_dic in newsList_dic["data"]["list"]:
           
            i+=1
            news = News()
#             if i == 2:
            # 只提取普通新闻，其他类型，例如专题都不提取。
            if news_dic["article_type"] == 0 :
                news.cms_id = news_dic["cms_id"]
                news.url = news_dic["url"]
                news.title = news_dic["title"]
                news.source = news_dic["media_name"]
                news.img = news_dic["img"]
                news.update_time = news_dic["update_time"]
                news.inTime = news_dic["publish_time"]
                htmlRequest = requests.get("https://new.qq.com/rain/a/" + news_dic["cms_id"])
                htmlRequest.encoding = 'utf-8'
                soup = BeautifulSoup(htmlRequest.text, 'lxml')
                image_list = soup.find_all("img")
                for image in image_list:
                    image["src"] = "https:" + image["src"]  # src_list[i]类型也是<class 'bs4.element.Tag'>
#                 print(news.__dict__)
                news.text = soup.find(attrs={'class':'content-article'})
                if news.text != None:
                    newsList.append(news)
                
        return newsList
    # soup = BeautifulSoup(html, "html.parser")
    # print(soup.get_text())

    
def main():
    
    url = "https://i.news.qq.com/trpc.qqnews_web.kv_srv.kv_srv_http_proxy/list?sub_srv_id=24hours&srv_id=pc&offset=0&limit=20&strategy=1&ext={%22pool%22:[%22top%22],%22is_filter%22:7,%22check_type%22:true}"
    newsList = getNews(url)
    
    conn = pymysql.connect(
        host='数据库IP地址',
        port=3306,
        user='用户名',
        password='密码',
        database='库名',
        charset='utf8'
        )
    cursor = conn.cursor()
    for news in newsList:
        
#         sql = "select count(1) from news where cms_id='%s'" % (news.cms_id)
#         cursor.execute(sql)
        
        sql = "insert into news (id, cms_id, url, title, source, img, text,update_time, in_time) \
            values('%s','%s','%s','%s','%s','%s','%s','%s','%s') ON DUPLICATE KEY UPDATE url='%s',title='%s',source='%s',img='%s',text='%s',update_time='%s';" % \
         (uuid.uuid4(), news.cms_id, news.url, news.title, news.source, news.img, news.text, news.update_time, news.inTime,
          news.url, news.title, news.source, news.img, news.text, news.update_time)
        
        try:
            # 执行sql语句
            cursor.execute(sql)
            # 执行sql语句
            conn.commit()
            print("添加一条新闻")
        except Exception as e:
            print(e)
            # 发生错误时回滚
            conn.rollback()
            print("错误")
#         sql = 'select * from news where cms_id=%s' % (news.cms_id)
#         cursor.execute(sql)
#         cursor.fetchmany(3)
#         # 光标按绝对位置移动1
#         cursor.scroll(1, mode="absolute")
    print("共抓取" + str(len(newsList)) + "条新闻")
    conn.close()
main()
