#!/usr/bin/env python
# coding=utf-8

import os, sys, re, traceback, logging
import urllib
import time
import datetime
import getopt
import urllib2
from Route import *

curpath = sys.path[0] + os.sep
rootpath = curpath[:curpath.find("resources")] + os.sep

entrance = "http://www.gdcyts.com/travel/index.asp"

route_dest = ""

route_payown = re.compile("<TR><TDheight=10></TD></TR><TR><TDheight=10>([^<].*?)</TD></TR><TR><TDheight=10></TD></TR>")

route_supplier_url = "http://www.gdcyts.com/"

route_title = re.compile("<FONTsize=5>(.*?)</FONT></H4>")

route_title_bak = re.compile("<strong>(.*?)</strong></TD>")

#route_type =   "普通线路"

route_supplier = "广东青旅"

route_telphone = re.compile("<FONTcolor=#f79709size=3>电&nbsp;&nbsp;&nbsp;话：(.*?)</FONT>")

route_days = re.compile(r"<TD>天数：(\d+)</TD>")

route_price = re.compile(r'<TDwidth="39%">网上报价\(大人\)：<SPANclass=price>(\d+)</SPAN></TD>')

route_guide = ""

route_dates = re.compile("出发日期：(.*?)</P><Palign=center>")

route_detail = re.compile("<SMALL><SMALL><SMALL>(.*?)</SMALL></SMALL></SMALL>")

route_out_city = re.compile("<TD>出发城市：(.*?)</TD>")

route_image_url = re.compile('<Palign=left><IMGsrc="(.*?)"')

route_person = ""

route_go_t = "-"

route_back_t = "-"

route_sights = re.compile("<STRONG>(.*?)</STRONG>")

schedule_place = re.compile(
        r"<TDclass=STYLE1background=http://www\.52zql\.com/upfile/2009-1-10/2009011011010979252\.gif>(.*?)</TD>")

schedule_traffic = re.compile('<TDalign=rightwidth="9%"bgColor=#f6f6f6>交通：</TD><TDwidth="15%"bgColor=#f6f6f6>(.*?)</TD>'
        )

schedule_repast = re.compile('<TDalign=rightwidth="8%"bgColor=#f6f6f6>用餐：</TD><TDwidth="13%"bgColor=#f6f6f6>(.*?)</TD>')

schedule_hotel = re.compile('住宿：</TD><TDwidth="15%"bgColor=#f6f6f6>(.*?)</TD>')

schedule_city = re.compile('抵达城市：</TD><TDwidth="18%"bgColor=#f6f6f6>(.*?)</TD>')

schedule_brief = re.compile("<TRclass=Table-dot><TDclass=nrvAlign=top>(.*?)</TR>")

each_schedule = re.compile(
        r"<TR><TDvAlign=topwidth=\d+>(.*?)</TD><TDvAlign=topwidth=\d+>(.*?)</TD><TDvAlign=topwidth=\d+>(.*?)</TD><TDvAlign=topwidth=\d+>(.*?)</TD><TDvAlign=topwidth=\d+>(.*?)</TD><TDvAlign=topwidth=\d+>(.*?)</TD></TR>"
        )

each_item = re.compile("<TD.*?>(.*?)</TD>")

omit_all_html_tags = re.compile("<.*?>")

def get_all_route_list_by_type(url):
    ret = []
    html = ""

    for match in re.finditer(r'<td><a href="(.*?)">(.*?)</a>\s+</td>', html):
        ret.append("http://www.gdcyts.com/travel/"+match.group(1))
    return ret
def get_source_from_gb(url):
    html = ""
    try:
        html = urllib2.urlopen(url,None,120).read().decode("gb2312").encode("utf-8")
    except UnicodeDecodeError:
        html = urllib2.urlopen(url,None,120).read().decode("gbk").encode("utf-8")
    print "debug--"+url
    return html

def get_all_route_list():
    html = get_source_from_gb(entrance)
    plist = {}
    ret = {}
    for match in re.compile(r'\[0,"(.*?)","","",-1,-1,0,"..(/travel/route_type.asp\?route_type=\d+)"').finditer(html):
        plist[match.group(1)] = "http://www.gdcyts.com"+match.group(2)+"&pageno=1&page_record_num=1000"
    for k,v in plist.iteritems():
        html = get_source_from_gb(v)
        for match in re.finditer(r'<td><a href="(.*?)">(.*?)</a>\s+</td>', html):
            ret["http://www.gdcyts.com/travel/"+match.group(1)] = k
    return  ret

def exec_crawl():
    for k,v in get_all_route_list().iteritems():
        try:
            print "Begin To Crawl "+k
            crawl_single(k,v)
            print "Done!"+k
        except :
            print "Crawl Error" + k
            print traceback.format_exc(sys.exc_info())
            continue
def schedule_bak(html):
    schedules = []
    
    return schedules
def crawl_single(url,route_type):
    lines = urllib2.urlopen(url,None,120).read()
    html = ""
    try:
        html = "".join(lines.split()).decode("gb2312").encode("utf-8")
    except UnicodeDecodeError:
        html = "".join(lines.split()).decode("gbk").encode("utf-8")

    #print html

    m = route_title.search(html)
    if(m):
        print  m.group(1)
    else:
        m =  route_title_bak.search(html)
        if(m):
            print m.group(1)
        else:
            print  "Crawl Error!Chief Culprit Is " + url
            return
    route = Route()
    route.site = "54"
    route.dest = route_dest
    route.supplier = route_supplier
    route.supplier_url = route_supplier_url
    route.type = route_type
    route.title = m.group(1)
    route.rout_url = url

    m = route_out_city.search(html)
    if(m):
        route.outcity = m.group(1)

    m = route_dates.search(html)
    if(m):
        route.dates = m.group(1)
    m = route_days.search(html)
    if(m):
        route.days = m.group(1)
    m = route_detail.search(html)
    if(m):
        route.detail = m.group(1)

    m = route_telphone.search(html)
    if(m):
        route.telphone = m.group(1)
    m = route_price.search(html)
    if(m):
        route.price = m.group(1)
    else:
        print "None Price.Passed!"
        return

    print route.tostr()

    route.schedules=[]

#    sch_cnts = each_schedule.findall(html)
#    if(len(sch_cnts) == 0):
#        print "Switch To Using Another Template"
#        return
#    for sch_cnt in sch_cnts:
#        items = each_item.findall(sch_cnt)
#        schedule = Schedule()
#        tmp = 0
#        for item in items:
#            oitem = omit_all_html_tags.sub("",item)
#            if(tmp == 4):
#                schedule.s_traffic = oitem
#            elif(tmp == 2):
#                schedule.s_brief = oitem
#            elif(tmp == 3) :
#                if(oitem.find("早") > -1):
#                    schedule.s_brf = "1"
#                elif(oitem.find("中") > -1):
#                    schedule.s_lunch = "1"
#                elif(oitem.find("晚") > -1):
#                    schedule.s_dinner = "1"
#            elif(tmp == 1):
#                schedule.s_place = oitem
#            route.schedules.append(schedule)
#            tmp = tmp + 1

    route.updRoute()
    #route.updRoute_Schedules()

def do_crawl():
    try:
        exec_crawl()
    except:
        print traceback.format_exc(sys.exc_info())

def usage():
    print '''Usage: %s [OPTIONS]...
Crawl hotel informations.

  -d,  --debug               enable pdb debugger

  -h,  --help                display this help and exit
''' % sys.argv[0]

if __name__ == '__main__':
#    get_all_route_list()
#get_all_route_list_by_type("http://www.gdcyts.com/travel/route_type.asp?route_type=1&pageno=1&page_record_num=1000")
#    crawl_single("http://www.gdcyts.com/travel/route_detail.asp?route_code=FEIJI&member_id=gdql&adv_id=","省内游")
#crawl_single("http://www.zql.yn.cn/route/info41200.html")
    do_crawl()
#	script_path = os.path.dirname(os.path.realpath(__file__))
#	os.chdir(script_path)
#	do_debug = False
#
#	try:
#		opts, args = getopt.gnu_getopt(sys.argv[1:], 'hdno:p:', ['help', 'debug', 'no-cache', 'only-cache', 'provider'])
#	except getopt.GetoptError:
#		usage()
#		sys.exit()
#
#	for opt, arg in opts:
#		if opt in ('-h', '--help'):
#			usage()
#			sys.exit()
#		elif opt in ('-d', '--debug'):
#			do_debug = True
#		elif opt in ('-n', '--no-cache'):
#			use_cache = False
#		elif opt in ('-o', '--only-cache'):
#			if arg.lower() in ('no', 'n', '0'):
#				only_cache = False
#			else:
#				only_cache = True
#		elif opt in ('-p', '--provider'):
#			pass
#
#	if do_debug:
#		import pdb
#		pdb.set_trace()
#	do_crawl()



