#!/usr/bin/env python
# coding: utf-8

# In[1]:


import requests
import re
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'}
def baidu(company):
    url = 'https://www.baidu.com/s?tn=news&rtt=1&bsst=1&cl=2&wd=' + company
    print(url)
companys = ['华能信托','阿里巴巴','百度集团']
for i in companys:
    baidu(i)


# In[20]:


p_href = '<h3 class="news-title_1YtI1"><a href="(.*?)"'    
href = re.findall(p_href, res, re.S)    
p_title = '<h3 class="news-title_1YtI1"8>.*?>(.*?)</a>'    
title = re.findall(p_title, res, re.S)    
p_date = '<span class="c-color-gray2 c-font-normal">(.*?)</span>'    
date = re.findall(p_date, res)    
p_source = '<span class="c-color-gray c-font-normal c-gap-right">(.*?)</span>'    
source = re.findall(p_source, res)


# In[ ]:


import requests
import re
import time
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36(KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'}
def baidu(company):
    url = 'https://www.baidu.com/s?tn=news&rtt=1&bsst=1&cl=2&wd=' + company
    res = requests.get(url, headers=headers, timeout=10).text
    #print(res)
    p_href = '<h3 class="news-title_1YtI1"><a href="(.*?)"' 
    href = re.findall(p_href, res, re.S)   
    p_title = '<h3 class="news-title_1YtI1">.*?>(.*?)</a>'    
    title = re.findall(p_title, res, re.S)    
    p_date = '<span class="c-color-gray2 c-font-normal">(.*?)</span>'    
    date = re.findall(p_date, res)    
    p_source = '<span class="c-color-gray c-font-normal c-gap-right">(.*?)</span>'    
    source = re.findall(p_source, res)
    for i in range(len(title)): 
        title[i] = title[i].strip()
        title[i] = re.sub('<.*?>', '', title[i])
        print(str(i + 1) + '.' + title[i], source[i], date[i])
        print(href[i])
        file1 = open('数据挖掘报告.txt','a',encoding='utf-8')
        file1.write(company + '数据挖掘completed！' + '\n' + '\n')
        for i in range(len(title)):        
            file1.write(str(i + 1) + '.' + title[i] + '(' + date[i] + '-' + source[i] + ')' + '\n')        
            file1.write(href[i] + '\n')  # '\n'表示换行    file1.write('——————————————————————————————' + '\n' + '\n')
        
#companys = ['五粮液', '阿里巴巴', '泸州老窖', '诺安成长', '腾讯', '京东']
#for i in companys:
    #baidu(i)
   # print('成功！')
   #print(i + '百度新闻爬取成功')
  #print('数据获取及生成报告成功')
 #     except:
#         print('爬取有问题，接着爬下一家公司')
while True:  # 一直运行的意思    
    companys = ['贵州茅台', '五粮液', '泸州老窖', '中国中免', '宁德时代', '迈瑞医疗', '美的集团', '中国平安', '隆基股份', '立讯精密', '药明康德', '顺丰控股', '亿纬锂能', '山西汾酒', '海康威视', '恒瑞医药', '爱尔眼科', '长春高新', '洋河股份', '三一重工', '通策医疗', '招商银行', '分众传媒', '京东方A', '万华化学', '通威股份', '宁波银行', '伊利股份', '赣锋锂业', '海尔智家', '汇川技术', '海大集团', '东方财富', '智飞生物', '芒果超媒', '韦尔股份', '万科A', '华友钴业', '金山办公', '紫金矿业', '恒生电子', '广联达', '比亚迪', '兴业银行', '平安银行', '格力电器', '中航光电', '阳光电源', '中国太保', '恩捷股份', '新华保险', '中航西飞', '西藏药业', '天赐材料']
    for i in companys:        
        try:            
            baidu(i)            
            print(i + '百度新闻爬取成功')        
        except:            
            print(i + '百度新闻爬取失败')    
    time.sleep(3600)  # 每3600秒运行一次，即1小时运行一次，注意缩进
   

