#!/usr/bin/env python
# coding=utf-8

import os, sys, re, traceback, logging
import urllib
import time
import datetime
import getopt
import urllib2
from Route import *

curpath = sys.path[0] + os.sep
rootpath = curpath[:curpath.find("resources")] + os.sep

entrance = "http://www.ynklzl.com//Tours/JS/ShowClass_Navigation.js"

route_dest = ""

route_payown = re.compile("<DIVid=line_content_bar>费用不含</DIV><DIV.*?>(.*?)</DIV>")

route_has = re.compile("<DIVid=line_content_bar>费用包含</DIV><DIVclass=colorbg>(.*?)</DIV>")

route_supplier_url = "http://www.ynklzl.com/"

route_title = re.compile("<title>(.*?)</title>")

#route_title_bak = re.compile("<strong>(.*?)</strong></TD>")

#route_type =   "普通线路"

route_supplier = "昆明国旅"

#route_telphone = re.compile("<FONTcolor=#f79709size=3>电&nbsp;&nbsp;&nbsp;话：(.*?)</FONT>")

route_days = re.compile(r"行程天数：(\d+)天")

route_price = re.compile(r'标准团报价：(\d+)\.00')

route_guide = ""

route_dates = re.compile("出团日期：(.*?)</TD>")

route_detail = re.compile("<divclass=colorbg>(.*?)</div>")

#route_out_city = re.compile("<TD>出发城市：(.*?)</TD>")

#route_image_url = re.compile('<Palign=left><IMGsrc="(.*?)"')

route_person = ""

route_go_t = "-"

route_back_t = "-"

route_sights = re.compile("<STRONG>(.*?)</STRONG>")

schedule_place = re.compile(
        r"<TDclass=STYLE1background=http://www\.52zql\.com/upfile/2009-1-10/2009011011010979252\.gif>(.*?)</TD>")

schedule_traffic = re.compile('<TDalign=rightwidth="9%"bgColor=#f6f6f6>交通：</TD><TDwidth="15%"bgColor=#f6f6f6>(.*?)</TD>'
        )

schedule_repast = re.compile('<TDalign=rightwidth="8%"bgColor=#f6f6f6>用餐：</TD><TDwidth="13%"bgColor=#f6f6f6>(.*?)</TD>')

schedule_hotel = re.compile('住宿：</TD><TDwidth="15%"bgColor=#f6f6f6>(.*?)</TD>')

schedule_city = re.compile('抵达城市：</TD><TDwidth="18%"bgColor=#f6f6f6>(.*?)</TD>')

schedule_brief = re.compile("<TRclass=Table-dot><TDclass=nrvAlign=top>(.*?)</TR>")

each_schedule = re.compile("<TR.*?#f0f8ff.*?>(.*?)</TR>")

each_item = re.compile("<TD.*?>(.*?)</TD><TD.*?>(.*?)</TD><TD.*?>(.*?)</TD><TD.*?>(.*?)</TD><TD.*?>(.*?)</TD>")

omit_all_html_tags = re.compile("<.*?>")

def get_source_from_gb(url):
    html = ""
    try:
        html = urllib2.urlopen(url,None,120).read().decode("gb2312").encode("utf-8")
    except UnicodeDecodeError:
        html = urllib2.urlopen(url,None,120).read().decode("gbk").encode("utf-8")
    print "debug--"+url
    return html

def get_all_route_list():
    html = get_source_from_gb(entrance)
    plist = {}
    ret = {}
    for match in re.compile("<a class='LinkNavigation' href='(.*?)'  target='_self'>(.*?)</a>").finditer(html):
        plist[match.group(2)] = "http://www.ynklzl.com"+match.group(1)
    for k,v in plist.iteritems():
        html = get_source_from_gb(v)
        for match in re.finditer(
                r'<LI><A href="(.*?)" target=_blank><SPAN class=linetitle>.*?</SPAN></A><SPAN class=price>.*?</SPAN></LI>'
                , html):
        #print "http://www.ynklzl.com"+match.group(1)
            ret["http://www.ynklzl.com"+match.group(1)] = k
    return  ret

def exec_crawl():
    for k,v in get_all_route_list().iteritems():
        try:
            print "Begin To Crawl "+k +" Type " + v
            crawl_single(k,v)
            print "Done!"+k
        except :
            print "Crawl Error" + k
            print traceback.format_exc(sys.exc_info())
            continue

def crawl_single(url,route_type):
    html = get_source_from_gb(url)
    html = "".join(html.split())
    #print html

    m = route_title.search(html)
    if(m):
        print  m.group(1)
    else:
        print  "Crawl Error!Chief Culprit Is " + url
        return
    route = Route()
    route.site = "46"
    route.dest = route_dest
    route.supplier = route_supplier
    route.supplier_url = route_supplier_url
    route.type = route_type
    route.title = m.group(1)
    route.rout_url = url
    route.outcity = "昆明"

    m = route_dates.search(html)
    if(m):
        route.dates = m.group(1)

    m = route_has.search(html)
    if(m):
        ttt =  m.group(1)
        arr = ttt.split("<BR>")
        for ar in arr:
            art =  ar[4:]
            if ar.find('景点') > -1:
                route.tickets = art
            elif ar.find('导游') > -1:
                route.guide = art
            elif ar.find('酒店') > -1:
                route.hotel = art
            elif ar.find('餐') > -1:
                route.meal = art
            elif ar.find('车') > -1:
                route.traffic01 = art

    m = route_days.search(html)
    if(m):
        route.days = m.group(1)
    m = route_detail.search(html)
    if(m):
        route.detail = m.group(1)

    m = route_price.search(html)
    if(m):
        route.price = m.group(1)
    else:
        print "None Price.Passed!"
        return

    print route.tostr()

    route.schedules=[]

    sch_cnts = each_schedule.findall(html)
    for sch_cnt in sch_cnts:
        num = 1
        for match in each_item.finditer(sch_cnt):
            schedule = Schedule()
            tmp = 0
            schedule.s_traffic = match.group(2)
            schedule.s_brief = omit_all_html_tags.sub("",match.group(3))
            schedule.s_hotel = match.group(5)
            oitem = match.group(4)
            if(oitem.find("早") > -1):
                schedule.s_brf = "1"
            elif(oitem.find("中") > -1):
                schedule.s_lunch = "1"
            elif(oitem.find("晚") > -1):
                schedule.s_dinner = "1"
            #print schedule.tostr()
            schedule.s_num = num
            num += 1
            route.schedules.append(schedule)

    route.updRoute()
    route.updRoute_Schedules()

def do_crawl():
    try:
        exec_crawl()
    except:
        print traceback.format_exc(sys.exc_info())

def usage():
    print '''Usage: %s [OPTIONS]...
Crawl hotel informations.

  -d,  --debug               enable pdb debugger

  -h,  --help                display this help and exit
''' % sys.argv[0]

if __name__ == '__main__':
#    get_all_route_list()
#get_all_route_list_by_type("http://www.gdcyts.com/travel/route_type.asp?route_type=1&pageno=1&page_record_num=1000")
#    crawl_single("http://www.ynklzl.com/Tours/2634.html","丽江-泸沽湖")
#crawl_single("http://www.zql.yn.cn/route/info41200.html")
    do_crawl()
#	script_path = os.path.dirname(os.path.realpath(__file__))
#	os.chdir(script_path)
#	do_debug = False
#
#	try:
#		opts, args = getopt.gnu_getopt(sys.argv[1:], 'hdno:p:', ['help', 'debug', 'no-cache', 'only-cache', 'provider'])
#	except getopt.GetoptError:
#		usage()
#		sys.exit()
#
#	for opt, arg in opts:
#		if opt in ('-h', '--help'):
#			usage()
#			sys.exit()
#		elif opt in ('-d', '--debug'):
#			do_debug = True
#		elif opt in ('-n', '--no-cache'):
#			use_cache = False
#		elif opt in ('-o', '--only-cache'):
#			if arg.lower() in ('no', 'n', '0'):
#				only_cache = False
#			else:
#				only_cache = True
#		elif opt in ('-p', '--provider'):
#			pass
#
#	if do_debug:
#		import pdb
#		pdb.set_trace()
#	do_crawl()



