#!/usr/bin/env python
# coding=utf-8

import os, sys, re, traceback, logging
import urllib
import time
import datetime
import getopt
import urllib2
from Route import *

'''
	site.49  港中旅国际旅行社                http://www.ctiol.com/
	国内游线路入口（目前只有北京出发的）：    http://www.ctiol.com/route_news/
	
	通过添加 start=0 参数获取线路翻页信息
'''
def do_crawl():
    try:
    	# - - - - - - - - - - - - - - - - - - - - - - - - 
    	# step 1: 提取所有的线路详细页 url (start=0 开始，按 6 递增，直到娶不到数据)
    	# - - - - - - - - - - - - - - - - - - - - - - - - 
		server_url = 'http://www.ctiol.com/route_news/'
		inland_detail_url_list = []    # 国内详细页 url
		# TODO
		start = 0
		while True:
			html = urllib2.urlopen(server_url + 'cn_tour.php?start=' + str(start)).read().decode('GB18030').encode('utf-8')
			#html = html.decode('utf-8')
			detail_url_list = re.compile(r'''detail_route.php[\S]+[\d]+''', re.X|re.S).findall(html)
			start += 6
			if len(detail_url_list) == 0:
				break
			else:
				for _d in detail_url_list:
					if inland_detail_url_list.__contains__(_d) == False:
						inland_detail_url_list.append(_d)
		# - - - - - - - - - - - - - - - - - - - - - - - - 
		# step 2: 提取详细页的信息放入 route
		# - - - - - - - - - - - - - - - - - - - - - - - - 
		#inland_detail_url_list = []
		#inland_detail_url_list.append('detail_route.php?id=3656');
		for _u in inland_detail_url_list:
			_u = server_url + _u
			html = urllib2.urlopen(_u).read().decode('GB18030').encode('utf-8')
			# table: 出团日期、成人价、儿童价、价格有效期、航空公司、游览城市
			route = Route()
			route.site = '49'           
			route.dest = re.compile(r'''<td[\s]+height="26"[\s]+bgcolor="f6f6f6">[\s]+<span[\s]+class="f14">([\S\s]+)</span></td>''', re.X|re.S|re.I).search(html).group(1)             
			route.rout_url = _u         
			route.supplier_url = 'http://www.ctiol.com/'
			route.title = re.compile(r'''<TITLE>([\S\s]+)</TITLE>''', re.X|re.S).search(html).group(1).replace('港中旅国际在线 -', '').replace('-独家推出', '').replace('-线路介绍', '').strip()
			route.days = len(html.split('<span class="title1">'))-1
			route.supplier = u'港中旅国际旅行社'
			route.telphone = '51379999'
			# dates
			_dates_m = re.compile(r'''<td[\s]+width="84[%]"[\s]+height="26"[\s]+bgcolor="f6f6f6">[\s]*<font[\s]+color="[#]000000">[\s]*([\S]+)[\s]*</font>[\s]*</td>''', re.X|re.S).search(html)
			if(_dates_m):
				route.dates = _dates_m.group(1)
			# price
			_price_m = re.compile(r'''<td[\s]+height="26"[\s]+bgcolor="f6f6f6">[\s]*<font[\s]+color="[#]000000">[\s]*<span[\s]+class="f14">([\S\s]+)</span></font></td>''', re.X|re.S).search(html)
			if(_price_m):
				route.price = _price_m.group(1).split('</span></font></td>')[0].replace('(人民币)', '')
			# detail
			_detail_m = re.compile(r'''<td[\s]+colspan="2"[\s]+bgcolor="[\S]+">[\s]+<font[\s]+color="[\S]+">([\S\s]+)</font></td>''', re.X|re.S|re.I).search(html)
			if(_detail_m):
				route.detail = _detail_m.group(1).split('</font>')[0]
			route.outcity = u'北京'
			# sights
			_sights_m = re.compile(r'''<meta[\s]+name="description"[\s]+content="([\S\s]+)">[\s]+<META''', re.X|re.S|re.I).search(html)
			if(_sights_m):
				route.sights = _sights_m.group(1)
			# pay
			_payown_m = re.compile(r'''<td[\s]+colspan="2"[\s]+bgcolor="[#]FFFCF0">[\s]+<font[\s]+color="[#]000000">[\s]+<span[\s]+class="f14">([\S\s]+)</span></font></td>''', re.X|re.S|re.I).search(html)
			if(_payown_m):
				route.payown = _payown_m.group(1)
			# 一条线路有多个行程
			route.schedules = []
			print route.title
			#_schedules_title_list = re.compile(r'''<table[\s]+width="100%"[\s]+height="20"[\s]+border="0"[\s]+align="center"[\s]+cellpadding="0"[\s]+cellspacing="0"[\s]+background="images/bg.gif">[\s]+<tr>[\s]+<td[\s]+width="396"[\s]+height="16"[\s]+bgcolor="[#]FFCC66"><font[\s]color="[#]000000"><span[\s]class="title1"><strong>[\S]+[\s]+[\S]+[\S]+[\s]+[\S]+''', re.X|re.S).findall(html)
			#for c in _schedules_title_list:
				#u = util.strutil(c)
				#_schedules_title = u.filter_tags()
			# 行程简介
			_schedules_brief_html = ''
			_schedules_brief_html_m = re.compile(r'''<table[\s]width="100%"[\s]border="0"[\s]align="center"[\s]cellpadding="0"[\s]cellspacing="0">([\S\s]+)<span[\s]class="f14">''', re.X|re.S).search(html)
			if(_schedules_brief_html_m):
				_schedules_brief_html = _schedules_brief_html_m.group().replace('<span class="text-2">', '#$%^').replace('</span><br>', '&&&&&')
			u = util.strutil(_schedules_brief_html)
			_schedules_brief_html = u.filter_tags()
			for c in _schedules_brief_html.split('#$%^'):
				route.schedules = []
				_schedule = Schedule()
				_schedule.s_num = str(len(_schedules_brief_html.split('#$%^')))
				_schedule.s_brief = c.split('&&&&&')[0].strip()
				route.schedules.append(_schedule)
				#print '    ' + _schedule.s_brief
				#print '-------------------------------'
				#_schedule.s_hotel = c[c.find(u'住宿')+3:c.find(u'住宿')+100].strip()
		print u'------------------------crawl [ctiol] ' + u' route done，size: ' + str(len(inland_detail_url_list))
		route.updRoute()
		route.updRoute_Schedules()
    except:
        print traceback.format_exc(sys.exc_info())

def usage():
	print '''Usage: %s [OPTIONS]...
Crawl hotel informations.

  -d,  --debug               enable pdb debugger

  -h,  --help                display this help and exit
''' % sys.argv[0]




if __name__ == '__main__':
	script_path = os.path.dirname(os.path.realpath(__file__))    # 读取到这个 spider.py 的工作路径
	os.chdir(script_path)                                        # 改变工作目录到 script_path
	do_debug = False
	
	
	# 设置命令行选项
	try:
		opts, args = getopt.gnu_getopt(sys.argv[1:], 'hdno:p:', ['help', 'debug', 'no-cache', 'only-cache', 'provider'])
	except getopt.GetoptError:
		usage()    # 打印出错误信息
		sys.exit()

	for opt, arg in opts:
		if opt in ('-h', '--help'):
			usage()
			sys.exit()
		elif opt in ('-d', '--debug'):
			do_debug = True
		elif opt in ('-n', '--no-cache'):
			use_cache = False
		elif opt in ('-o', '--only-cache'):
			if arg.lower() in ('no', 'n', '0'):
				only_cache = False
			else:
				only_cache = True
		elif opt in ('-p', '--provider'):
			pass

	if do_debug:
		import pdb
		pdb.set_trace()
	do_crawl()



