#!python2.7
# -*- coding: utf-8 -*-
#-----------------------------
# Website:      http://www.teepr.com/
# Method:   	GET
# Encoding: 	utf-8
# Version:      1.0
#
# Note:
#Sin_Wun:新聞
#Sheng_Huo:生活
#Ying Pian:影片
#Yu_Le:娛樂
#Bao_Siao:爆笑
#Dong_Wu:動物
#Jing_Ji:驚奇
#Yi_Shu:藝術
#Biao_Yan:表演
#Ke_Ji:科技
#Lyu_You:旅遊
#Nyu_Sing:女性
#Ce_Yan:測驗
#Yun_Dong:運動
#Shang_Ye:商業
#Ke_Siao:科學
#Jheng_Jhih:政治
#-----------------------------
import os
import re
import sys
import time
import random
import string
import pymysql
from pymongo import MongoClient
import requests
import warnings
from lxml import etree
from myModule import crawler
import _strptime #
from datetime import datetime, timedelta
from collections import OrderedDict
from bs4 import BeautifulSoup
reload(sys)
sys.setdefaultencoding('utf-8')
class teepr_news(crawler):
	def __init__(self):
		self.dbName = 'teepr_news'
		super(teepr_news, self).__init__()
		self.cur = pymysql.connect(self.mysqlIP, self.mysqlID, self.mysqlPWD, self.dbName,
		                        autocommit=True,
		                        charset='utf8',
		                        use_unicode=True).cursor(pymysql.cursors.DictCursor)
		self.sleepBase = 10
		self.historyNum = 0
		self.boardNumDict = {'Sin_Wun':'%E6%96%B0%E8%81%9E',Ying Pian':'%E5%BD%B1%E7%89%87','Yu_Le':'%E5%A8%9B%E6%A8%82','Sheng_Huo':'%E7%94%9F%E6%B4%BB',
		'Bao_Siao':'%E7%88%86%E7%AC%91','Dong_Wu':'%E5%8B%95%E7%89%A9','Jing_Ji':'%E9%A9%9A%E5%A5%87',
		'Yi_Shu':'%E8%97%9D%E8%A1%93','Biao_Yan':'%E8%A1%A8%E6%BC%94','Ke_Ji':'%E7%A7%91%E6%8A%80',
		'Lyu_You':'%E6%97%85%E9%81%8A','Nyu_Sing':'%E5%A5%B3%E6%80%A7%E5%B0%88%E5%8D%80','Ce_Yan':'%E6%B8%AC%E9%A9%97
		'Yun_Dong':'%E9%81%8B%E5%8B%95','Shang_Ye':'%E5%95%86%E6%A5%AD','Ke_Siao':'%E7%A7%91%E5%AD%B8',
		'Jheng_Jhih':'%E6%94%BF%E6%B2%BB',}
		self.userAgentLt = self.getUserAgentLt()

	def regexIndex(self, url, reverse=False):
		if reverse==False:
			return re.sub(r'.*http://www.teepr.com/(.*)', r'\1', url)
		else:
			return 'http://www.teepr.com/%s' %(url)



	def parseContent(self, htmlDocument):
		#print htmlDocument
		url = htmlDocument['url']
		#print url
  		board = htmlDocument['board']
		sourceBoard = board
		#print board
  		fileContent = htmlDocument['html']
		tree = etree.HTML(fileContent)
		title = tree.xpath('//div/header/h1')[0].text

		#print title
  		content = tree.xpath('//div/p[@style="text-align: center;"]/strong')
		articleContent = ''.join([i.xpath('normalize-space()') for i in content if i!= '']).encode('utf-8',"ignore")
		#print articleContent
  		cTimeObject = datetime.strptime('19000101_00:00','%Y%m%d_%H:%M').strftime('%Y%m%d_%H:%M')
		#print "cTimeObject"+cTimeObject
		cDate = cTimeObject.split('_')[0]
		cTime = cTimeObject.split('_')[1]

		author = u'\u7121\u4f5c\u8005'


		quoteFrom = ""
		pushIDArray = []
		pushContentArray = []
		pushTimeArray = []
		messageNum = ""
		pushNum = ""
		sourceType = "news"
		sourceWebsite = "trrpr_news"
		pageDict = OrderedDict([
  			('sourceType', sourceType),
  			('sourceWebsite', sourceWebsite),
  			('sourceBoard', sourceBoard),
  			('url', url),
		 	('author', author),
		 	('title', title),
		 	('cTimeObject', cTimeObject),
		 	('cDate', cDate),
		 	('cTime', cTime),
		 	('content', articleContent),
	 	 	('quoteFrom', quoteFrom),
	 	 	('pushIDArray', pushIDArray),
	 	 	('pushContentArray', pushContentArray),
	 	 	('pushTimeArray', pushTimeArray),
        	('messageNum', messageNum),
        	('pushNum', pushNum),
        	('path', '%s/%s' %(os.getcwd(), board)),
        	('uid', '%s&%s&%s' %(sourceType[0],'teepr',re.sub('[a-z]','',board) if re.compile('.*[A-Z].*').match(board) else board)),
  			])
		return pageDict

	def getBoardURLLt(self, board, endDate=(datetime.now()+timedelta(days=-2)).strftime('%Y-%m-%d')):
		"""
		Get urls over index page and combine with queueURLLt and boardURLLt by getPageURLURL as boardURLLt for crawling.

		Args:
		    board (str): the desired board of index page.
		Returns:
		    list of new url list, queue url list, and board url list.
		"""
		boardURLLt = []
		## destURL - dbURL差集url
		pageNum = 1 ##開始頁數

		self.cur.execute('SELECT url FROM `%s_pageURL` ORDER BY mysql_create_time LIMIT 10000000000;' %(board))
		dbURLLt = map(lambda i: i['url'].encode('utf-8'), self.cur.fetchall())
		endDateObject = datetime.strptime(endDate,'%Y-%m-%d')
		currentDateObject = datetime.now()
		while currentDateObject >= endDateObject:
			url = 'http://www.teepr.com/category/%s/page/%i/' %(self.boardNumDict[board], pageNum)
			response = self.getResponse(board, url)
			tree = etree.HTML(response.content)


			#print "currentDateObject:"+datetime.strftime(currentDateObject,'%Y-%m-%d')

			destURLLt = [i.get('href' )for i in tree.xpath('//div[@class="List-4"]/h3/a')]
			soup = BeautifulSoup(response.text, "html.parser")
			for i in soup.find_all(class_="thetime updated")[-1].strings:  # 通过遍历可以使列表生成器对象值输出后面为列表生成器对象<listiterator object at 0x7f71457f5710>
				t = i

			a = t.strip().replace(',', '').replace(' ', '')[7:11]
			b = t.strip().replace(',', '').replace(' ', '')[5:7]

			month = t.strip().replace(',', '').split(' ')[0]

			Emonth = ['January', 'February', 'March', 'April',
					  'May', 'June', 'July', 'August', 'September',
					  'October', 'November', 'December']

			for imonth in Emonth:
				if imonth == month:
					t_2 = (Emonth.index(imonth) + 1)  ##通过 列表名.index(相应列表元素) 可得到相应元素的序列号

			time = a + '-' + '0' + repr(t_2) + '-' + b

			currentDateObject = datetime.strptime(time, '%Y-%m-%d')

			destURLLt = list(set(destURLLt) - set(dbURLLt))
			boardURLLt = boardURLLt + destURLLt
			print 'destURLLt len: %i (%s: %s)' %(len(destURLLt), self.dbName, board)

			k = len(destURLLt)
			#if k==0:
			    #endDateObject = datetime.strptime('2020-12-30','%Y-%m-%d')
			self.cur.execute('INSERT INTO log (idx, board, url, url_status, create_time) \
			                VALUES ("%s", "%s", "%s", "destURLLt len %i", "%s");' \
			                %(datetime.now().strftime('%Y%m%d_%H:%M:%S')+''.join([random.choice(string.ascii_uppercase) for i in range(0,4)]),
			                board,
			                url,
			                len(destURLLt),
			                datetime.now().strftime('%Y%m%d_%H:%M:%S.%f')))

			pageNum+=1

			##stop mechanism
			if len(boardURLLt)>10000:
				endDateObject = datetime.strptime('2020-12-30','%Y-%m-%d')
			#
			# if len(tree.xpath('//li[@class="next"]')) == 0:
			#     endDateObject = datetime.strptime('2020-12-30','%Y-%m-%d')

			if pageNum==18:
				endDateObject = datetime.strptime('2020-12-30','%Y-%m-%d')

			#time.sleep(random.random()*self.sleepBase)
			new_url_num = len(boardURLLt)

			queueURLLt, historyURLLt = self.getPageURLURL(board)
			boardURLLt = boardURLLt + queueURLLt + historyURLLt

		return boardURLLt, new_url_num, len(boardURLLt)-new_url_num

	def getPageContent(self, board, url):
		"""
        Utilize getResponse(), write html text to MongoDB by writeHTMLToMongoDB(), parse content and write result by getParseContent().

        Args:
            board (str): the board of the url.
            url (str): the desired url.
        """
		response = self.getResponse(board, url)
		tree = etree.HTML(response.content)
		element = tree.xpath('//div[@class="md breadcrumb"]/div[@class="title"]')
		if (response != None) and (response.ok):  ##response!=None to avoid error url, response.ok to avoid something like HTTP 404
			content = response.content
			self.writeHTMLToMongoDB(board, url,
									content.decode("gbk", "ignore").encode("utf-8"))  ## 重写改动处 将网页编码 "GBK" 转码为 "utf-8"

			htmlDocument = [i for i in self.client[self.dbName]['%s_html' % (board)].find({'url': url})][0]
			pageDict = self.parseContent(htmlDocument)

			pageDict['author'] = self.author_ETL(pageDict['author'])
			if datetime.now() < datetime.strptime(pageDict['cTimeObject'], '%Y%m%d_%H:%M'):
				pageDict['cTimeObject'] = datetime.strptime(datetime.now().strftime('%Y%m%d') + '_' + pageDict['cTime'],
															'%Y%m%d_%H:%M').strftime('%Y%m%d_%H:%M')
				pageDict['cDate'] = datetime.now().strftime('%Y%m%d')

			map(lambda key: pageDict.update({key: htmlDocument[key]}), ['idx', 'createTime', 'updateTime'])
			self.getParseContent(board, url, pageDict)


# board = 'Sin_Ji'
# url = 'https://news.gamme.com.tw/1448481'
# gamme_news().getPageContent(board, url)
#
#write index page content
board = 'Sin_Wun'
boardURLLt, new_url_num, update_url_num = teepr_news().getBoardURLLt(board, '2016-08-10')
for i in boardURLLt:
	teepr_news().getPageContent(board, i)