#!/usr/bin/python 
# coding=utf-8
import urllib2
import httplib
import socket
import sys
import lxml.etree
import lxml.html
import StringIO
import types
from    video_lib.video_db.movie_queue import movie_queue
import  video_lib.video_db.db_init
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from _mysql_exceptions import Warning, Error, InterfaceError, DataError, \
     DatabaseError, OperationalError, IntegrityError, InternalError, \
     NotSupportedError, ProgrammingError
import time
import datetime
import chardet
import product_get
import re
reload(sys)
sys.setdefaultencoding('utf-8')

"""
   根据产品名称，获取产品列表，插入到视频队列中
"""
std_headers = {
	'User-Agent': 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.12) Gecko/20101028 Firefox/3.6.12',
	'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
	'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
	'Accept-Language': 'en-us,en;q=0.5',
}

class video_page:
	
	def __init__(self):
		self.v_product			="";
		self.v_search_url		="";
		self.v_page			="";
		self.v_video_url		=[];
		self.v_video_title		=[];
		self.v_video_des		=[];
		self.v_video_limit		=3;
		self.v_web_host			= "http://www.youtube.com"
		self.v_web_search		= "http://www.youtube.com/results?search_query=";
		self.v_product_id		= -1;


	def set_product(self,product,id):
		self.v_product			=	product;
		self.v_search_url 		=	self.v_web_search+product.replace(" ","+");
		self.v_product_id		= 	id;


	def err(self,message):
		print >>sys.stderr,message;

	def get_page(self):
		request 	= urllib2.Request(self.v_search_url, None, std_headers)
		try:
			self.v_page = urllib2.urlopen(request).read()
		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
			self.err('ERROR:unable to get [%s]. err=[%s]' % (self.v_search_url,str(err)));
			return 1;
		return 0;

	def analysis_page(self):	
		tree 			= lxml.etree.HTML(self.v_page)
		#self.v_video_url	= tree.xpath("//div[@class='result-item-main-content']/h3/a/@href")
		#self.v_video_title	= tree.xpath("//div[@class='result-item-main-content']/h3/a/@title")
		#self.v_video_des	= tree.xpath("//div[@id='search-results']/div/div/p[normalize-space(@class)='description']")
		#self.v_video_des	= tree.xpath("//div[@class='result-item-main-content']/p[normalize-space(@class)='description']")
		#print type(tree.xpath("//div[@class='result-item-main-content']")[0]);
		#print tree.xpath("//div[@class='result-item-main-content']")[0].xpath("//h3")[0].text;
		#print tree.xpath("//div[@class='result-item-main-content']")[0].getchildren();
		#print tree.xpath("//div[@class='result-item-main-content']")[0].find("p");
		#print tree.xpath("//div[@class='result-item-main-content']")[0].xpath("p");

		#print tree.xpath("//div[@class='result-item-main-content']")[0].find("h3").attrib;

		"""获取当前页面的全部检索结果"""
		searchresult		= tree.xpath("//div[@class='result-item-main-content']")
		"""分别分析每一个结果元素,
		   这样做的原因是：每一个结果元素的格式不同,如果直接诶从最外层抽取将会导致错位。
		"""
	#create session
		youtubeSession 		= sessionmaker(bind= video_lib.video_db.db_init.engine)
		num			= 0;
		for r in searchresult:
			if r.find("p") is not None:# 用这个条件来判断是否是类聚的vidoe，类聚的video会和已有video重复 (需要review这个策略 防止遗漏的好的video)
				url   =r.find("h3/a").attrib.get('href')
				#暂时只处理 watch开头的url
				if re.search("^/watch",url): 
					"""获取当前节点下的h3/a中的title属性作为 视频的title"""
					title = unicode(r.find("h3/a").attrib.get('title')).encode("utf-8");
					"""利用p节点下text属性作为des"""
					"""关于其他方法请参见 http://lxml.de/api/index.html 中 lxml.etree._Element 对象的说明"""
					des  = unicode(r.find("p").text).encode("utf-8")
					if title == "":
						title = self.v_product
					p = movie_queue();
					p.parser_youtube_set(unicode(self.v_product).encode("utf-8"),\
							     self.v_web_host+url,\
							     title,\
							     des,\
							     self.v_product_id);
					try:
						session_analysis	= youtubeSession();
						session_analysis.add(p);
 						#session commit
                				session_analysis.commit();
						session_analysis.close();
					except  sqlalchemy.exc.IntegrityError, v:
						print "[dup]:\t",url
						continue;
					print url,title,des

"""
		all	 		= 0;
		if  (len(self.v_video_url) < self.v_video_limit):
			all 	=	len(self.v_video_url)
		else:
			all 	=	self.v_video_limit
		i		=	0;
	#create session
		youtubeSession 		= sessionmaker(bind= video_lib.video_db.db_init.engine)
		session_analysis	= youtubeSession();
		print all
		while ( i < all ):
			print self.v_web_host+self.v_video_url[i]
			#print unicode(self.v_video_des[i].text).encode("utf-8");
			#print unicode(self.v_video_title[i]).encode("utf-8");
			#p = movie_queue();
			#print unicode(self.v_video_title[i]).encode("utf-8") ,unicode(self.v_video_des[i].text).encode("utf-8")
			#p.parser_youtube_set(unicode(self.v_product).encode("utf-8"),\
			#		             self.v_web_host+self.v_video_url[i],\
			#		     unicode(self.v_video_title[i]).encode("utf-8"),\
			#		     unicode(self.v_video_des[i].text).encode("utf-8"));
	#add table object into  session
			#session_analysis.add(p);
	
			i+=1;
	#session commit
		#session_analysis.commit();
"""
if __name__ == '__main__':
	pp = product_get.product();
	if not pp.send_query():
		sys.exit();
	youtube = video_page();
	youtube.v_video_limit	= 100
	#youtube.set_product("G-Shock      GD-100-1ACR");
	#youtube.set_product("suunto core all black review,hd");
	#youtube.set_product("panasonic gh2")
	print pp.get_product_name()
	youtube.set_product(pp.get_product_name(),pp.get_product_id())
	youtube.get_page();
	youtube.analysis_page();
	pp.handle_product_finish();
