#!/usr/bin/env python
# coding=utf-8

import os, sys, re, traceback, logging
import urllib
import time
import datetime
import getopt
import urllib2
from Route import *

curpath = sys.path[0] + os.sep
rootpath = curpath[:curpath.find("resources")] + os.sep

entrance = ""

route_dest = ""

route_payown = re.compile("<b>报价不含：</b><br>(.*?)</P>")

route_has = re.compile("<b>报价包含：</b><br>(.*?)</P>")

route_supplier_url = "http://www.scyts.com.cn/"

route_title = re.compile("<SPANid=LabRouteName>(.*?)</SPAN>")

#route_title_bak = re.compile("<strong>(.*?)</strong></TD>")

route_type_re =   re.compile("网站首页</A>>><A.*?>.+</A>>>(.+?)</div>")

route_supplier = "上海中国青年旅行社"

#route_telphone = re.compile("<FONTcolor=#f79709size=3>电&nbsp;&nbsp;&nbsp;话：(.*?)</FONT>")

route_days = re.compile(r"行程天数：(\d+)天")

route_price = re.compile(r'标准团报价：(\d+)\.00')

route_guide = ""

route_dates = re.compile("出团日期：(.*?)</TD>")

route_detail = re.compile('<DIV.*?>线路特色：</DIV><DIVstyle="PADDING-LEFT:6px"class="txt2">(.*?)</DIV>')

#route_out_city = re.compile("<TD>出发城市：(.*?)</TD>")

route_image_url = re.compile(r"img1\.src='(.*?)'")

route_person = ""

route_go_t = "-"

route_back_t = "-"

route_traffic = re.compile("<b>旅游交通：</b>(.*?)<P>")

route_sights = re.compile("<STRONG>(.*?)</STRONG>")

route_plane = re.compile(r'document\.getElementById\("frame1"\)\.location\("(.*?)"\)')

route_plane_dates = re.compile(r"</td><td class='dot_line txt3' width='12%'>(\d+)月(\d+)日</td><td class")

route_plane_prices = re.compile(r"￥(\d+).00")

route_plane_left = re.compile("width='10%'><b>(.*?)</td>")

schedule_place = re.compile(r"第\d+天</SPAN><TD.*?>(.*?)</TD>")

#schedule_traffic = re.compile('<TDalign=rightwidth="9%"bgColor=#f6f6f6>交通：</TD><TDwidth="15%"bgColor=#f6f6f6>(.*?)</TD>')

schedule_repast = re.compile("<SPANclass=scene_in>(.*?)</SPAN></TD>")

schedule_hotel = re.compile('<SPANclass=scene_instyle="PADDING-TOP:3px">(.*?)</SPAN>')

#schedule_city = re.compile('抵达城市：</TD><TDwidth="18%"bgColor=#f6f6f6>(.*?)</TD>')

schedule_brief = re.compile('<TDstyle="PADDING-TOP:10px"vAlign=topalign=left>(.*?)</TD>')

each_schedule = re.compile("<TABLEclass=txt2.*?>(.*?)</TABLE>")

#each_item = re.compile("<TD.*?>(.*?)</TD><TD.*?>(.*?)</TD><TD.*?>(.*?)</TD><TD.*?>(.*?)</TD><TD.*?>(.*?)</TD>")

omit_all_html_tags = re.compile("<.*?>")

def get_source_from_gb(url):
    html = ""
    try:
        html = urllib2.urlopen(url,None,120).read().decode("gb2312").encode("utf-8")
    except UnicodeDecodeError:
        html = urllib2.urlopen(url,None,120).read().decode("gbk").encode("utf-8")
    print "debug--"+url
    return html

def get_all_route_list():
    ret = {}
    for pg in range(1,10000):
         entra = "http://www.scyts.com.cn/travel_search_type.asp?vclass=all&vkey=&ltype=&price=&CP=" + str(pg)
         print entra
         html = get_source_from_gb(entra)
         for match in re.compile(r'<a href="(tour\.asp\?xlid=\w+)"  class="index_list" title=".*?">.*?</a>').finditer(html):
             print "http://www.scyts.com.cn/"+ match.group(1)
             ret["http://www.scyts.com.cn/"+ match.group(1)] = "None"

         if re.search(r"images/next_page_act\.gif", html):
             continue
         else:
             print "Next Page Is Null!Break Now!"
             break
    return  ret

def exec_crawl():
    for k,v in get_all_route_list().iteritems():
        try:
            print "Begin To Crawl "+k +" Type " + v
            crawl_single(k,v)
            print "Done!"+k
        except :
            print "Crawl Error" + k
            print traceback.format_exc(sys.exc_info())
            continue

def crawl_single(url,route_type):
    html = get_source_from_gb(url)
    html = "".join(html.split())
    #print html
    route = Route()
    m = route_plane.search(html)
    if(m):
        print "http://www.scyts.com.cn/"+m.group(1)
        thtml = get_source_from_gb("http://www.scyts.com.cn/"+m.group(1))
        #print thtml
        lefts = route_plane_left.findall(thtml)
        datess = route_plane_dates.findall(thtml)
        pricess = route_plane_prices.findall(thtml)
        print "length of lefts,dates,prices:" ,len(lefts),len(datess),len(pricess)
        tc = -1
        tdatee = []
        min = 9999999
        for le in lefts:
            tc = tc + 1
            if(tc == ""):
                print "None Left,Continue."
                continue
            tdatee.append(str(time.localtime().tm_year) + "-" +"-".join(datess[tc]))
            if(int(pricess[tc])!= 0 & int(pricess[tc]) < min):
                 min = pricess[tc]
        if(len(tdatee) == 0):
            print "Warn!This Route has No Avai Plan .Cur Url is "+url
            return
        route.dates = ",".join(tdatee)
        route.price = str(min)
    else:
        print "Warn!Can not Get Route Plane.Cur Url is "+url
        return

    m = route_title.search(html)
    if(m):
        print  m.group(1)
    else:
        print  "Crawl Error!Chief Culprit Is " + url
        return

    route.site = "44"
    route.dest = route_dest
    route.supplier = route_supplier
    route.supplier_url = route_supplier_url

    route.title = m.group(1)
    route.rout_url = url
    route.outcity = "上海"

    m = route_type_re.search(html)
    if(m):
        route.type = m.group(1)

    m = route_payown.search(html)
    if(m):
        route.payown = m.group(1)

    m = route_image_url.search(html)
    if(m):
        route.img_url = m.group(1)

    m = route_has.search(html)
    if(m):
        ttt =  m.group(1)
        route.meal = ttt

    m = route_detail.search(html)
    if(m):
        route.detail = m.group(1)

    route.schedules=[]

    sch_cnts = each_schedule.findall(html)
    route.days = str(len(sch_cnts))
    cnum = 0
    for sch_cnt in sch_cnts:
            cnum = cnum + 1
            schedule = Schedule()
            schedule.s_num = str(cnum)
            schedule.s_brief = "".join(schedule_brief.findall(sch_cnt))
            schedule.s_hotel = "".join(schedule_hotel.findall(sch_cnt))
            schedule.s_place = "".join(schedule_place.findall(sch_cnt))
            oitem = "".join(schedule_repast.findall(sch_cnt))
            if(oitem.find("早") > -1):
                schedule.s_brf = "1"
            elif(oitem.find("中") > -1):
                schedule.s_lunch = "1"
            elif(oitem.find("晚") > -1):
                schedule.s_dinner = "1"
            route.schedules.append(schedule)

    route.updRoute()
    route.updRoute_Schedules()

def do_crawl():
    try:
        exec_crawl()
    except:
        print traceback.format_exc(sys.exc_info())

def usage():
    print '''Usage: %s [OPTIONS]...
Crawl hotel informations.

  -d,  --debug               enable pdb debugger

  -h,  --help                display this help and exit
''' % sys.argv[0]

if __name__ == '__main__':
#    get_all_route_list()
#    crawl_single("http://www.scyts.com.cn/tour.asp?xlid=28215628","None")
    do_crawl()
#	script_path = os.path.dirname(os.path.realpath(__file__))
#	os.chdir(script_path)
#	do_debug = False
#
#	try:
#		opts, args = getopt.gnu_getopt(sys.argv[1:], 'hdno:p:', ['help', 'debug', 'no-cache', 'only-cache', 'provider'])
#	except getopt.GetoptError:
#		usage()
#		sys.exit()
#
#	for opt, arg in opts:
#		if opt in ('-h', '--help'):
#			usage()
#			sys.exit()
#		elif opt in ('-d', '--debug'):
#			do_debug = True
#		elif opt in ('-n', '--no-cache'):
#			use_cache = False
#		elif opt in ('-o', '--only-cache'):
#			if arg.lower() in ('no', 'n', '0'):
#				only_cache = False
#			else:
#				only_cache = True
#		elif opt in ('-p', '--provider'):
#			pass
#
#	if do_debug:
#		import pdb
#		pdb.set_trace()
#	do_crawl()



