#!/usr/bin/env python
#-*- coding: utf-8 -*-
#@Time : 2017/11/27 15:42
#@Author : ChenMei

import sys
import requests
import re
from bs4 import BeautifulSoup
import urllib2
import threading
import datetime
import os
import time
import MySQLdb

'''
对人民网的新闻进行爬取，网页数据支持编码格式为gbk
经过分析人民网主要有四种不同的网页类型
参数：url
return：新闻文本
'''
def spider(url):
    try:
        page = urllib2.urlopen(url)
        req = page.read().decode("gbk")
        soup = BeautifulSoup(req, "html.parser")
        if 'health.people.com' in url:
            content = soup.find('div', class_='artDet')
        elif 'pic.people.com' in url:
            content = soup.find('div',class_='content cler clearfix')
        elif 'theory.people.com' in url:
            content = soup.find('div',class_='show_text')
        else:
            content = soup.find('div',class_='box_con')
        contentStr = ""
        if content is not None:
            contentStr = content.text.strip()
    except:
        print "can't find page"
        contentStr = ""
    return contentStr


'''将新闻数据存入数据库中'''
def sqlHandle(allnews):
    db = MySQLdb.connect(host='localhost', user="root", passwd="123456", db="opinionSupervision", charset="utf8")
    cursor = db.cursor()
    for news in allnews:
        sql = """insert into news(date,title,content,url)
                values('%s','%s','%s','%s')""" %(news["date"],news["title"],news["content"],news["url"])
        try:
            cursor.execute(sql)
            db.commit()
            print "写入成功"
        except:
            db.rollback()
            print "写入失败"
    db.close()

'''
遍历爬取每条url，提取新闻题目构建存储路径，并将
爬取到的文本存储到指定文件中,同时存入数据库中
'''
def threadSpider(urls):
    allnews = []
    nset = set()
    for u in urls:
        title = clearTitle(u[0])
        suburl = u[1]
        content = spider(suburl)
        path = "peopleNews/" + title + ".txt"
        if len(content) > 0:
            saveFile(path,content)
            if nset.__contains__(suburl):
                continue
            nset.add(suburl)
            news = {}
            news["url"] = suburl
            news["title"] = title
            news["content"] = content
            news['date'] = datetime.datetime.now().strftime('%Y-%m-%d')
            allnews.append(news)
    sqlHandle(allnews)


'''
剔除新闻标题中的无关字符
'''
def clearTitle(title):
    newTitle = title.replace("&quot;","").replace("nbsp;"," ")
    return newTitle

'''
将新闻存入指定路径的文件中
'''
def saveFile(filePath,content):
    try:
        write = open(filePath, 'w')
        write.write(content.strip())
        print "write " + filePath + " success"
    except:
        print "write " + filePath + " error"

'''
创建文件夹
'''
def buildDir(dirPath):
    if os.path.exists(dirPath):
        return True
    else:
        os.makedirs(dirPath)
        return True

'''
从日志文件中读取已经爬取过的url的set集合
'''
def readLog(logPath):
    file = open(logPath)
    urlset = set()
    for url in file.readlines():
        urlset.add(url.strip('\n'))
    return urlset

'''
如果之前已经存有日志文件，则对日志文件进行追加写入
'''
def updateLog(logPath,logContent):
    update = open(logPath,'a')
    for line in logContent:
        update.write(line + '\n')
    update.close()
    print "更新日志完成"

'''
保存日志文件
'''
def saveLog(logPath,logContent):
    savelog = open(logPath,'w')
    for line in logContent:
        savelog.write(line + '\n')
    savelog.close()
    print "存储日志完成"

def main():
    reload(sys)
    sys.setdefaultencoding('utf-8')
    today = datetime.datetime.now().strftime('%Y-%m-%d')
    logPath = "log/people/" + today + ".log"
    if os.path.exists(logPath):
        urlset = readLog(logPath)
        print "读取日志文件"
    else:
        urlset = set()

    buildDir("peopleNews/")
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
        'Cookie': 'tt_webid=6487802632568473101',
    }
    #获取系统时间戳作为参数，得到最新的数据
    timepara = str((int)(time.time()))
    url = "http://news.people.com.cn/210801/211150/index.js?_=" + timepara
    req = requests.get(url, headers=headers)
    pattern = re.compile(r'"title":"(.*?)","url":"(.*?)","date":"(.*?)"')
    pats = pattern.findall(req.text)
    urls = []

    urlUndo = set()
    '''提取出日期为当天的新闻url'''
    for u in pats:
        if u[2].split(" ")[0] == today and not urlset.__contains__(u[1]):
            urls.append(u)
            urlUndo.add(u[1])
    num = 10
    ulen = len(urls) / 10
    print "总长度为:" + str(len(urls))

    '''对url进行分片，然后多线程的去处理爬虫任务'''
    for i in range(num):
        if i < num - 1:
            arr = urls[i * ulen : (i + 1) * ulen]
        else:
            arr = urls[i * ulen : ]
        print "第" + str(i) + "片长度为:" + str(len(arr))
        thre = threading.Thread(target=threadSpider(arr))
        thre.start()
        print "第" + str(i) + "个线程启动"

    if os.path.exists(logPath):
        updateLog(logPath,urlUndo)
    else:
        saveLog(logPath,urlUndo)

#执行点
if __name__ == '__main__':
    main()

