# coding=utf-8
#抓取器

import time
import urllib.request
from bs4 import BeautifulSoup
import json
import re
import math
import sys

sys.path.append("..")
from script.deqQueue import DeqQueue


class CrawlerTask:
	def __init__(self, json_data):
		pass

	def get_url(self,data_str):
		data = json.loads(data_str)
		if 'url' in data:
			url = data['url']
		
		return url

	def get_crawl_type(self):
		return 0


class CrawlerWorker:
	def __init__(self):
		pass
	
	def isRunning(self):
		return True
	
	def isValid(self,task):
		task.startswith("http")
		return True
	
	def get_deq_name(self,task):
		if task.startswith('https://twitter'):
			return "link"
		if task.startswith('https://www.facebook'):
			return "link_facebook"
		if task.startswith('https://weibo'):
			return "link_weibo"
	
	def crawler(self,task):
		html = urllib.request.urlopen(task).read()
		return html
	
	def set_into_deq(self,content):
		try:
			# print("set")
			# print (data)
			self.conn.lpush(queue_name, content)
			return True
		except:
			return False
		
	
	def work(self):
		queue = DeqQueue()
		while(self.isRunning()):
			data_str = queue.get(self.get_deq_name(task))

			if (data_str == None or data_str == ''):
				print("data is empty")
				return None
			
			task = CrawlerTask.get_url(data_str)  #task就是个url
			
			if (CrawlerWorker.isValid(task)):
				content = CrawlerWorker.crawler(task)  #content是抓取回来的页面
				
				CrawlerWorker.set_into_deq(content)