#!/usr/bin/env python
# coding=utf-8

import os, sys, re, traceback, logging
import urllib
import time
import datetime
import getopt
import urllib2
from Route import *

curpath = sys.path[0] + os.sep
rootpath = curpath[:curpath.find("resources")] + os.sep

entrance = ""

route_dest = ""

route_payown = re.compile("<strong>价格不含：</strong><p>(.*?)</p>")

route_has = re.compile("<strong>价格包含：</strong><p>(.*?)</p>")

route_supplier_url = "http://www.xingzhilv.com/"

route_supplier = "北京中广国际旅行社"

route_title = re.compile(r"<title>(.*?)\|北京中广国际旅行社-星之旅</title>")

#route_title_bak = re.compile("<strong>(.*?)</strong></TD>")

#route_type_re =   re.compile("网站首页</A>>><A.*?>.+</A>>>(.+?)</div>")

route_telphone = re.compile(r"<h5>(\S+?)</h5>")

#route_days = re.compile(r"行程天数：(\d+)天")

#route_price = re.compile(r'标准团报价：(\d+)\.00')

route_date_prices = re.compile(r'<tdalign="center">(\d{4}-\d{2}-\d{2})</td><tdclass="priceList"align="center">(\d+)元</td>')

route_guide = ""

#route_dates = re.compile("出团日期：(.*?)</TD>")

route_detail = re.compile('<DIV.*?>线路特色：</DIV><DIVstyle="PADDING-LEFT:6px"class="txt2">(.*?)</DIV>')

#route_out_city = re.compile("<TD>出发城市：(.*?)</TD>")

route_image_url = re.compile(r"img1\.src='(.*?)'")

route_person = ""

route_go_t = "-"

route_back_t = "-"

route_traffic = re.compile("<b>旅游交通：</b>(.*?)<P>")

route_sights = re.compile("<STRONG>(.*?)</STRONG>")

#route_plane = re.compile(r'document\.getElementById\("frame1"\)\.location\("(.*?)"\)')

#route_plane_dates = re.compile(r"</td><td class='dot_line txt3' width='12%'>(\d+)月(\d+)日</td><td class")

#route_plane_prices = re.compile(r"￥(\d+).00")

#route_plane_left = re.compile("width='10%'><b>(.*?)</td>")

schedule_place = re.compile("<p>(.*?)<br/>")

#schedule_traffic = re.compile('<TDalign=rightwidth="9%"bgColor=#f6f6f6>交通：</TD><TDwidth="15%"bgColor=#f6f6f6>(.*?)</TD>')

schedule_repast_hotel = re.compile(".*<br/>(.*?)</p>")

schedule_hotel = re.compile('<SPANclass=scene_instyle="PADDING-TOP:3px">(.*?)</SPAN>')

#schedule_city = re.compile('抵达城市：</TD><TDwidth="18%"bgColor=#f6f6f6>(.*?)</TD>')

schedule_brief = re.compile("<br/>(.*?)<")

each_schedule = re.compile(r"D\d+</FONT></B></DIV></TD><TDwidth=455><DIV><FONTsize=2>(.*?)</FONT></DIV></TD><TDwidth=91><DIValign=center><FONTsize=2>(.*?)</FONT></DIV></TD><TDwidth=98><DIValign=center><FONTsize=2>(.*?)</FONT></DIV></TD></TR>")

#each_item = re.compile("<TD.*?>(.*?)</TD><TD.*?>(.*?)</TD><TD.*?>(.*?)</TD><TD.*?>(.*?)</TD><TD.*?>(.*?)</TD>")

omit_all_html_tags = re.compile("<.*?>")

def get_source_from_gb(url):
    html = ""
    try:
        html = urllib2.urlopen(url,None,120).read().decode("gb2312").encode("utf-8")
    except UnicodeDecodeError:
        html = urllib2.urlopen(url,None,120).read().decode("gbk").encode("utf-8")
    print "debug--"+url
    return html

def get_source_by_post(url,data):
    html = ""
    try:
        html = urllib2.urlopen(url,data,120).read().decode("gb2312").encode("utf-8")
    except UnicodeDecodeError:
        html = urllib2.urlopen(url,data,120).read().decode("gbk").encode("utf-8")
    print "debug--"+url
    return html

def get_all_route_list():
    entras = {"出境旅游":"14", "特价线路":"16","特殊线路":"17","北京旅游":"18","周边旅游":"19", "京郊旅游":"20","国内旅游":"15"}
    post_tmp = "s10=#tid&s20=&s30=&keyx=&submit=%BF%AA%CA%BC%B2%E9%D1%AF"
    ret = {}
    for k,v in entras.iteritems():
        html = get_source_by_post("http://www.xingzhilv.com/xl_search.asp",post_tmp.replace("#tid",v,1))
        html = "".join(html.split())
        print html

        for match in re.compile('<tdheight="30">.*?</td><td><ahref="(.*?)">.*?</a>').finditer(html):
            print "http://www.xingzhilv.com/"+ match.group(1)
            ret["http://www.xingzhilv.com/"+ match.group(1)] = k
    return  ret

def exec_crawl():
    for k,v in get_all_route_list().iteritems():
        try:
            print "Begin To Crawl "+k +" Type " + v
            crawl_single(k,v)
            print "Done!"+k
        except :
            print "Crawl Error" + k
            print traceback.format_exc(sys.exc_info())
            continue

def crawl_single(url,route_type):
    print "Can Not Crawl At All.No Price.No Date"
#    html = get_source_from_gb(url)
#    html = "".join(html.split())
#    print html
#    route = Route()
#    tdatee = []
#    min = 999999999
#    for match in route_date_prices.finditer(html):
#        tdatee.append(match.group(1))
#        if(min > int(match.group(2))):
#            min = int(match.group(2))
#    if(min == 999999999 | min == 0) :
#        print "Can not Get Price Info.Begin to Return.URL is "+ url
#        return
#    route.dates = ",".join(tdatee)
#    route.price = str(min)
#
#
#    m = route_title.search(html)
#    if(m):
#        print  m.group(1)
#    else:
#        print  "Crawl Error!Chief Culprit Is " + url
#        return
#
#    route.site = "32"
#    route.dest = route_dest
#    route.supplier = route_supplier
#    route.supplier_url = route_supplier_url
#
#    route.title = m.group(1)
#    route.rout_url = url
#    route.outcity = "北京"
#
#    route.type = route_type
#
#    m = route_payown.search(html)
#    if(m):
#        route.payown = m.group(1)
#
#    m = route_has.search(html)
#    if(m):
#        ttt =  m.group(1)
#        route.meal = ttt
#
#    m = route_detail.search(html)
#    if(m):
#        route.detail = m.group(1)
#
#
#    route.schedules=[]

#    sch_cnts = each_schedule.findall(html)
#    route.days = str(len(sch_cnts))
#    cnum = 0
#    for sch_cnt in sch_cnts:
#            print sch_cnt
#            cnum = cnum + 1
#            schedule = Schedule()
#            schedule.s_num = str(cnum)
#            schedule.s_brief = "".join(schedule_brief.findall(sch_cnt))
#            schedule.s_hotel = "".join(schedule_hotel.findall(sch_cnt))
#            schedule.s_place = "".join(schedule_place.findall(sch_cnt))
#            oitem = "".join(schedule_repast_hotel.findall(sch_cnt))
#            #print oitem
#            if(oitem.find("早餐:含") > -1):
#                schedule.s_brf = "1"
#            if(oitem.find("午餐:含") > -1):
#                schedule.s_lunch = "1"
#            if(oitem.find("晚餐:含") > -1):
#                schedule.s_dinner = "1"
#            if(oitem.find("住宿:") > -1):
#                schedule.s_hotel = oitem[oitem.find("住宿:")+7:]
#            if(schedule.s_brief == ""):
#                schedule.s_brief = sch_cnt
#            route.schedules.append(schedule)

    #route.updRoute()
    #route.updRoute_Schedules()

def do_crawl():
    try:
        exec_crawl()
    except:
        print traceback.format_exc(sys.exc_info())

def usage():
    print '''Usage: %s [OPTIONS]...
Crawl hotel informations.

  -d,  --debug               enable pdb debugger

  -h,  --help                display this help and exit
''' % sys.argv[0]

if __name__ == '__main__':
#    get_all_route_list()
    crawl_single("http://www.xingzhilv.com/xl_con.asp?id=217","境外旅游")
#    do_crawl()
#	script_path = os.path.dirname(os.path.realpath(__file__))
#	os.chdir(script_path)
#	do_debug = False
#
#	try:
#		opts, args = getopt.gnu_getopt(sys.argv[1:], 'hdno:p:', ['help', 'debug', 'no-cache', 'only-cache', 'provider'])
#	except getopt.GetoptError:
#		usage()
#		sys.exit()
#
#	for opt, arg in opts:
#		if opt in ('-h', '--help'):
#			usage()
#			sys.exit()
#		elif opt in ('-d', '--debug'):
#			do_debug = True
#		elif opt in ('-n', '--no-cache'):
#			use_cache = False
#		elif opt in ('-o', '--only-cache'):
#			if arg.lower() in ('no', 'n', '0'):
#				only_cache = False
#			else:
#				only_cache = True
#		elif opt in ('-p', '--provider'):
#			pass
#
#	if do_debug:
#		import pdb
#		pdb.set_trace()
#	do_crawl()



