# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import re
import time
import aiomysql
import mysql.connector
import pymysql
import threading

def DownUrl(url):
	headers={
			"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
			"Accept-Encoding":"gzip, deflate, sdch",
			"Accept-Language":"zh-CN,zh;q=0.8",
			# "Connection":"keep-alive",
			"Host":"www.importnew.com",
			"Referer":"http://www.importnew.com/all-posts",
			"Upgrade-Insecure-Requests":"1",
			"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.130 Safari/537.36"
		}
	r = requests.get(url,headers)
	return r.text

def GetTitleInfo(html):
	soup=BeautifulSoup(html,'html.parser')
	div= soup.find('div',attrs={'id':'archive'})
	pagecontent= div.find_all(name='div', attrs={'class':'post floated-thumb'}, recursive=True, text=None, limit=None, kwargs='')
	for item in pagecontent:

		title=""
		href=""
		imgurl=""

		# post-thumb
		try:
			header= item.find('div', attrs={'class':'post-thumb'}, recursive=True, text=None, kwargs='')
			a= header.find('a', attrs={}, recursive=True, text=None, kwargs='')
			img=a.find('img', attrs={}, recursive=True, text=None, kwargs='')
			imgurl = img.get("src")
		except Exception as e:
			pass
		else:
			pass
		# header= item.find('div', attrs={'class':'post-thumb'}, recursive=True, text=None, kwargs='')
		# a= header.find('a', attrs={}, recursive=True, text=None, kwargs='')
		# title=a.get("title")
		# href=a.get("href")
		# img=a.find('img', attrs={}, recursive=True, text=None, kwargs='')
		# imgurl = img.get("src")
		# print(title,href,imgurl)
		# data=re.split('/',href)
		# last=data[len(data)-1]
		# article=re.split('\.',last)
		# articleId=article[0]
		# print(articleId)

		# post_meta
		post_meta= item.find('div', attrs={'class':'post-meta'}, recursive=True, text=None, kwargs='')
		index=0
		types=''
		alist=post_meta.find_all('a', attrs={}, recursive=True, text=None, limit=None, kwargs='')
		for p in alist:
			index=index+1
			if index==2:
				types=p.getText()
			elif index==1:
				title=p.getText().replace('&#8203;','').replace('\u200b','')
				href=p.get("href")
				data=re.split('/',href)
				last=data[len(data)-1]
				article=re.split('\.',last)
				articleId=article[0]
		
		rtimes='1900-01-01'
		alltext=post_meta.p.getText()
		pattern = re.compile(r'\w.*(\d{4}.\d{2}.\d{2})')
		newcontent=alltext.replace('【','').replace('】','').replace('「','').replace('」','')
		match = pattern.match(newcontent)
		if match:
			rtimes=match.group(1)
		else:
			print(alltext)

		# 存库
		add(title,href,imgurl,types,rtimes,articleId)

def add(title,href,imgurl,types,dates,articleId):
	print(title)
	conn= pymysql.connect(host='localhost',port=3306,user='root',passwd='',db ='views',charset='utf8')
	cur = conn.cursor()
	sqli="insert into UrlContent values(%s,%s,%s,%s,%s,%s,%s)"
	cur.executemany(sqli,[('0',articleId,href,title,dates,types,imgurl)])
	conn.commit()
	cur.close()
	conn.close()

def loadBasicUrl(pager):
	for x in range(1,pager):
		print(x)
		url = "http://www.importnew.com/all-posts/page/"+ str(x)
		html= DownUrl(url)
		GetTitleInfo(html)
		time.sleep(1)

def test():
	alltext='【译】Reddit如何统计每个帖子的浏览量2017/06/18 |  基础技术 | 0 条评论'.replace('【','').replace('】','')
	print(alltext)
	pattern = re.compile(r'\w.*(\d{4}.\d{2}.\d{2})')
	match = pattern.match(alltext)
	if match:
		print('ok')
		rtimes=match.group(1)
		print(rtimes)
	else:
		print('error')



# test()
loadBasicUrl(100)