#!/usr/bin/env python
#-*- coding: utf-8 -*-
#@Time : 2017/11/22 14:48
#@Author : ChenMei

import requests
import re
from newsSpider import nSpider
import os
import sys
import datetime
import json
import MySQLdb

'''
判断文件夹是否存在，如果不存在，则创建该文件夹
'''
def buildDir(dirPath):
    if os.path.exists(dirPath):
        return True
    else:
        os.makedirs(dirPath)
        return True

'''
定义新闻类别与url中对应参数的键值对
主要包括12大类别
'''
def classifyMap():
    map = {
        "社会" : "shehui",
        "要闻" : "yaowen",
        "国内" : "guonei",
        "军事" : "war",
        "独家" : "dujia",
        "财经" : "money",
        "科技" : "tech",
        "体育" : "sports",
        "娱乐" : "ent",
        "时尚" : "lady",
        "汽车" : "auto",
        "健康" : "jiankang",
        "国际" : "guoji"
    }
    return map

'''
从日志文件中读取已经爬取过的url的set集合
'''
def readLog(logPath):
    file = open(logPath)
    urlset = set()
    for url in file.readlines():
        urlset.add(url.strip('\n'))
    return urlset

'''
如果之前已经存有日志文件，则对日志文件进行追加写入
'''
def updateLog(logPath,logContent):
    update = open(logPath,'a')
    for line in logContent:
        update.write(line + '\n')
    update.close()
    print "更新日志完成"

'''
保存日志文件
'''
def saveLog(logPath,logContent):
    savelog = open(logPath,'w')
    for line in logContent:
        savelog.write(line + '\n')
    savelog.close()
    print "存储日志完成"

'''
执行主函数
遍历不同类别的新闻，根据参数构建请求新闻列表的url
通过正则表达式将新闻相关的url以及其产生的时间匹配出来
对新闻url进行过滤，过滤掉除今天以外的其他url，并将url
及时间构建成数组传入newsSpider对象中执行
'''
def main():
    reload(sys)
    sys.setdefaultencoding('utf-8')
    today = datetime.datetime.now().strftime('%m/%d/%Y')
    logPath = "log/163/" + datetime.datetime.now().strftime('%Y-%m-%d') + ".log"
    if os.path.exists(logPath):
        urlset = readLog(logPath)
        print "读取日志文件"
    else:
        urlset = set()

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
        'Cookie': 'tt_webid=6487802632568473101',
    }
    kvmap = classifyMap()
    urlUndo = set()
    for clasify in kvmap:
        buildDir("163test/" + clasify.encode('gb2312'))
        urls = []
        para = kvmap[clasify]
        for i in range(5):
            print "获取第 " + str(i) + " 页 " + clasify + " 元数据"
            if i == 0:
                url = "http://temp.163.com/special/00804KVA/cm_" + para + ".js?callback=data_callback"
            elif i < 10:
                url = "http://temp.163.com/special/00804KVA/cm_" + para + "_0" + str(i + 1) + ".js?callback=data_callback"
            else:
                url = "http://temp.163.com/special/00804KVA/cm_" + para + "_" + str(i + 1) + ".js?callback=data_callback"
            req = requests.get(url, headers)
            text = "{\"news\":" + req.text[14:-1] + "}"
            try:
                jsonDate = json.loads(text)
                for news in jsonDate["news"]:
                    t = news["time"].split(" ")[0]
                    if t != today or urlset.__contains__(news["docurl"]):
                        continue
                    newsurl = {}
                    newsurl["time"] = t
                    newsurl["url"] = news["docurl"]
                    newsurl["tienum"] = news["tienum"]
                    urls.append(newsurl)
                    urlUndo.add(news["docurl"])
            except:
                break
        if len(urls) > 0:
            newspider = nSpider(urls, clasify)
            newspider.run()

    if os.path.exists(logPath):
        updateLog(logPath,urlUndo)
    else:
        saveLog(logPath,urlUndo)


#执行点
if __name__ == '__main__':
    main()