#!/var/bin/python
# -*- coding: utf-8 -*-
#######################
#	搜狗公众号搜索
#######################
from pymongo import MongoClient
import sys
import time
import urllib,urllib.request
from bs4 import BeautifulSoup
import os
dirs = os.path.abspath(os.path.dirname(__file__)+"/../Config")
os.sys.path.append(dirs)   #将上上级目录加载到python的环境变量中
# os.sys.path.append("D:/job/crawler/Config")
from configbond import logpath

##多进程
from multiprocessing import Pool
import random

import socket

client = MongoClient('192.168.1.100',27017)
db=client.bond
collection=db.weixin
ip = socket.gethostbyname(socket.gethostname())

print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'Start crawling the data. . .')


def getSougouWeixin(keyword,i):
	print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),"keyword:%s , pages:%d. . ." % (keyword,i))
	time.sleep(30)
	url = 'http://weixin.sogou.com/weixin?type=2&page=%s&query=%s' % (i,urllib.request.quote(keyword))
	user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
	headers = {'User-Agent': user_agent}
	req = urllib.request.Request(url, headers=headers)
	response = urllib.request.urlopen(req,timeout=5)
	proxypage = response.read()
	z_data = proxypage.decode('UTF-8')
	soup = BeautifulSoup(z_data,"html.parser")
	txtlist = soup.find("div",class_="news-box").find_all("div",class_="txt-box")
	for txt in txtlist:
		title = txt.find("h3").find("a").text
		publisher = txt.find("div",class_="s-p").find("a").text
		url = txt.find("h3").find("a").get("href")
		#获取公众号内容		
		req = urllib.request.Request(url, headers=headers)
		response = urllib.request.urlopen(req,timeout=5)
		proxypage = response.read()
		z_data = proxypage.decode('UTF-8')
		soup = BeautifulSoup(z_data,"html.parser")
		collection.insert({"title":title,"publisher":publisher,"url":url,"content":soup.find("div",id="img-content").text})
for keyword in ["债券到期","短期债券","长期债券","中期债券","应收利息","除权日","买入债券者","到期收益","债券吸引力","复利","债券评级","信贷风险","高收益债券","买回债券","卖回债券","偿债基金","无担保债券","固定收益","抵押债券","高收益债券","投资级债券","无息债券","不记名债券","企业债","记名债券","帐面记录债券"]:
	for i in range(1,11):
		getSougouWeixin(keyword,i)
exit()
def long_time_task(name,):	
	print('Run task %s (%s)...' % (name, os.getpid()))
	start = time.time()
	time.sleep(random.random() * 3)
	end = time.time()
	print('Task %s runs %0.2f seconds.' % (name, (end - start)))
	dealData(name)
	driver.close()
	driver.quit()

if __name__=='__main__':
	print('Parent process %s.' % os.getpid())
	p = Pool(4)
	for i in range(4):
	    p.apply_async(long_time_task, args=(i,))
	print('Waiting for all subprocesses done...')
	p.close()
	p.join()
	print('All subprocesses done.')
#多进程

