#!/usr/bin/env python
# coding=utf-8

import os, sys, re, traceback, logging
import urllib
import time
import datetime
import getopt
import urllib2
from Route import *

'''
	site.50  上海航空假期旅行社              www.sh-holiday.com
	入口： http://www.sh-holiday.com/sales/list_search_result.asp   无分页
'''
def do_crawl():
    try:
    	# - - - - - - - - - - - - - - - - - - - - - - - - 
    	# step 1: 提取所有的线路详细页 url 将部分信息放入
    	# - - - - - - - - - - - - - - - - - - - - - - - - 
		server_url = 'http://www.sh-holiday.com/sales/list_search_result.asp'
		html = urllib2.urlopen(server_url).read().decode('gbk').encode('utf-8')
		
		inland_detail_url_list = []    # 国内详细页 url  
		detail_url_list = re.compile(r'''<span[\s]+class=t_tx_09>[\S]+</span></td><td[\s]+class=d_fx_08><a[\s]+href=show_itinerary.asp[\S]+KI[\d]+>[\S]+</a></td><td[\s]+class=d_fx_08>[\S]+</td><td[\s]+class=d_fx_08>[\S]+</td><td[\s]+class=d_fx_08>[\S]+</td><td[\s]+class=d_fx_08>[\S]+</td></tr><tr[\s]+height=27><td[\s]+class=d_fx_07>[\S]+</td><td[\s]+class=d_fx_08[\s]+colspan=5>[\s]*[\S]+[/][\d]+''', re.X|re.S).findall(html)
		for c in detail_url_list:
			if inland_detail_url_list.__contains__(c) == False:
				inland_detail_url_list.append(c)
		# 所有线路的简要信息，详细信息请求详细页获取，为嵌套的 iframe, 其 src 是js赋值读取不到
		for c in inland_detail_url_list:
			route = Route()
			route.site = '50'
			route.dest = re.compile(r'''<span[\s]+class=t_tx_09>([\S]+)</span></td><td[\s]+class=d_fx_08><a[\s]+href=(show_itinerary.asp[\S]+KI[\d]+)>([\S]+)</a></td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td>''', re.X|re.S).search(c).group(5)
			route.rout_url = 'http://www.sh-holiday.com/sales/' + re.compile(r'''<span[\s]+class=t_tx_09>([\S]+)</span></td><td[\s]+class=d_fx_08><a[\s]+href=(show_itinerary.asp[\S]+KI[\d]+)>([\S]+)</a></td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td>''', re.X|re.S).search(c).group(2)
			route.supplier_url = 'http://www.sh-holiday.com/'
			route.title = re.compile(r'''<span[\s]+class=t_tx_09>([\S]+)</span></td><td[\s]+class=d_fx_08><a[\s]+href=(show_itinerary.asp[\S]+KI[\d]+)>([\S]+)</a></td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td>''', re.X|re.S).search(c).group(3)
			route.days = re.compile(r'''<span[\s]+class=t_tx_09>([\S]+)</span></td><td[\s]+class=d_fx_08><a[\s]+href=(show_itinerary.asp[\S]+KI[\d]+)>([\S]+)</a></td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td>''', re.X|re.S).search(c).group(6)
			route.type = ''
			route.supplier = '上海航空假期旅行社'
			route.telphone = '10105858'
			route.dates = re.compile(r'''<span[\s]+class=t_tx_09>([\S]+)</span></td><td[\s]+class=d_fx_08><a[\s]+href=(show_itinerary.asp[\S]+KI[\d]+)>([\S]+)</a></td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td></tr><tr[\s]+height=27><td[\s]+class=d_fx_07>[\S]+</td><td[\s]+class=d_fx_08[\s]+colspan=5>([\s]*[\S]+[/][\d]+)''', re.X|re.S).search(c).group(8).replace('&nbsp;', ';')
			route.price = re.compile(r'''<span[\s]+class=t_tx_09>([\S]+)</span></td><td[\s]+class=d_fx_08><a[\s]+href=(show_itinerary.asp[\S]+KI[\d]+)>([\S]+)</a></td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td>''', re.X|re.S).search(c).group(1).replace('起', '')
			route.guide = ''
			route.plane = ''
			route.detail = ''
			route.outcity = re.compile(r'''<span[\s]+class=t_tx_09>([\S]+)</span></td><td[\s]+class=d_fx_08><a[\s]+href=(show_itinerary.asp[\S]+KI[\d]+)>([\S]+)</a></td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td>''', re.X|re.S).search(c).group(4)
			route.person = ''
			route.go_t = re.compile(r'''<span[\s]+class=t_tx_09>([\S]+)</span></td><td[\s]+class=d_fx_08><a[\s]+href=(show_itinerary.asp[\S]+KI[\d]+)>([\S]+)</a></td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td>''', re.X|re.S).search(c).group(7)
			route.back_t = re.compile(r'''<span[\s]+class=t_tx_09>([\S]+)</span></td><td[\s]+class=d_fx_08><a[\s]+href=(show_itinerary.asp[\S]+KI[\d]+)>([\S]+)</a></td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td><td[\s]+class=d_fx_08>([\S]+)</td>''', re.X|re.S).search(c).group(7)
			route.sights = ''
			route.hotel = ''
			route.meal = ''
			route.traffic01 = ''
			route.traffic02 = ''
			route.tickets = ''
			route.payown = ''
			route._img_url = ''
			print route.title
			print '------------------------[上海航空假期旅行社][sh-holiday]' + ' 线路抓取完毕，一共 ' + str(len(inland_detail_url_list)) + ' 条'
			route.updRoute()
			#route.updRoute_Schedules()
    except:
        print traceback.format_exc(sys.exc_info())

def usage():
	print '''Usage: %s [OPTIONS]...
Crawl hotel informations.

  -d,  --debug               enable pdb debugger

  -h,  --help                display this help and exit
''' % sys.argv[0]




if __name__ == '__main__':
	script_path = os.path.dirname(os.path.realpath(__file__))    # 读取到这个 spider.py 的工作路径
	os.chdir(script_path)                                        # 改变工作目录到 script_path
	do_debug = False
	
	
	# 设置命令行选项
	try:
		opts, args = getopt.gnu_getopt(sys.argv[1:], 'hdno:p:', ['help', 'debug', 'no-cache', 'only-cache', 'provider'])
	except getopt.GetoptError:
		usage()    # 打印出错误信息
		sys.exit()

	for opt, arg in opts:
		if opt in ('-h', '--help'):
			usage()
			sys.exit()
		elif opt in ('-d', '--debug'):
			do_debug = True
		elif opt in ('-n', '--no-cache'):
			use_cache = False
		elif opt in ('-o', '--only-cache'):
			if arg.lower() in ('no', 'n', '0'):
				only_cache = False
			else:
				only_cache = True
		elif opt in ('-p', '--provider'):
			pass

	if do_debug:
		import pdb
		pdb.set_trace()
	do_crawl()



