#!/usr/bin/env python
# coding=utf-8

import os, sys, re, traceback, logging
import urllib
import time
import datetime
import getopt
import urllib2
from Route import *

curpath = sys.path[0] + os.sep
rootpath = curpath[:curpath.find("resources")] + os.sep

entrance = "http://www.sztravel.com.cn/last/map.aspx"

route_site_id = "43"

route_dest = ""

route_payown = re.compile('<ahref="#notgo">费用不含</a></span></dt><ddclass="hide">(.*?)</dd>')

route_has = re.compile('<ahref="#notgo">费用包含</a></span></dt><ddclass="hide">(.*?)</dd>')

route_supplier_url = "http://www.sztravel.com.cn/"

route_supplier = "深圳市口岸中旅"

route_title = re.compile("<h2>(.*?)</h2>")

#route_title_bak = re.compile("<strong>(.*?)</strong></TD>")

route_type_re =   re.compile('<liclass="on"><ahref=".*?">(.*?)</a></li>')

route_telphone = re.compile(r"<h5>(\S+?)</h5>")

route_days = re.compile(r"行程天数：<span><b>(\d+)天</b>")

route_price = re.compile(r'<divclass="jbxxjiage4">(\d+)</div>')

#route_date_prices = re.compile(r'<tdalign="center">(\d{4}-\d{2}-\d{2})</td><tdclass="priceList"align="center">(\d+)元</td>')

route_guide = ""

route_dates = re.compile("出发班期：<span><b>(.*?)</b>")

route_detail = re.compile('<ahref="#notgo">路线特色</a></span></dt><dd>(.*?)</dd>')

route_out_city = re.compile('<divclass="jbxx3">出发城市：<span><b>(.*?)</b></span>')

route_image_url = re.compile(r'pics="\.\.(.*?)\|')

route_person = ""

route_go_t = "-"

route_back_t = "-"

route_traffic = re.compile("<b>旅游交通：</b>(.*?)<P>")

route_sights = re.compile("【(.*?)】")

####计划出发时间###

route_plane = re.compile(
        r"<tr><td>\d+</td><td>\w+</td><td>(\d+)月(\d+)日</td><td>.*?</td><td>(\d+)</td><td>RMB(.*?)</td><td>.*?</td><td>.*?</td><td>.*?</td></tr>"
        )

#route_plane_dates = re.compile(r"</td><td class='dot_line txt3' width='12%'>(\d+)月(\d+)日</td><td class")

#route_plane_prices = re.compile(r"￥(\d+).00")

#route_plane_left = re.compile("width='10%'><b>(.*?)</td>")

#schedule_place = re.compile("<p>(.*?)<br/>")

#schedule_traffic = re.compile('<TDalign=rightwidth="9%"bgColor=#f6f6f6>交通：</TD><TDwidth="15%"bgColor=#f6f6f6>(.*?)</TD>')

#schedule_repast_hotel = re.compile(".*<br/>(.*?)</p>")

#schedule_hotel = re.compile('<SPANclass=scene_instyle="PADDING-TOP:3px">(.*?)</SPAN>')

#schedule_city = re.compile('抵达城市：</TD><TDwidth="18%"bgColor=#f6f6f6>(.*?)</TD>')

#schedule_brief = re.compile("<br/>(.*?)<")

each_schedule = re.compile(
        '<divclass="itemsclearfix.*?"><divclass="bfx-show">.*?</div><p>(.*?)</p><p>(.*?)</p><p>(.*?)</p></div>')

#each_item = re.compile("<TD.*?>(.*?)</TD><TD.*?>(.*?)</TD><TD.*?>(.*?)</TD><TD.*?>(.*?)</TD><TD.*?>(.*?)</TD>")

omit_all_html_tags = re.compile("<.*?>")

def get_source_from_gb(url):
    html = ""
    try:
        html = urllib2.urlopen(url,None,120).read().decode("gb2312").encode("utf-8")
    except UnicodeDecodeError:
        html = urllib2.urlopen(url,None,120).read().decode("gbk").encode("utf-8")
    print "debug--"+url
    return html

def get_source_by_post(url,data):
    html = ""
    try:
        html = urllib2.urlopen(url,data,120).read().decode("gb2312").encode("utf-8")
    except UnicodeDecodeError:
        html = urllib2.urlopen(url,data,120).read().decode("gbk").encode("utf-8")
    print "debug--"+url
    return html

def get_all_route_list():
    ret = {}
    html = urllib2.urlopen(entrance,None,120).read()
    citys = re.compile(r'<a href="\.\.(.*?\d+&ctype=\d+)">.*?</a>').findall(html)
    for city in citys:
        for pg in range(1,10000):
            print "http://www.sztravel.com.cn" + city + "&pageNo=" + str(pg)
            html = urllib2.urlopen("http://www.sztravel.com.cn" + city + "&pageNo=" + str(pg),None,120).read()
            html = "".join(html.split())
            for match in re.compile('<span><atarget="_blank"href="(.*?)">.*?</a></span>').finditer(html):
                print "http://www.sztravel.com.cn/travel/" + match.group(1)
                ret["http://www.sztravel.com.cn/travel/" + match.group(1)] = "nil"
            if(re.search("&nbsp;下一页&nbsp;末页", html) or (not re.search("末页", html))):
                print "Last Page.Begin To Break Now!"
                break
    return  ret

def exec_crawl():
    for k,v in get_all_route_list().iteritems():
        try:
            print "Begin To Crawl "+k +" Type " + v
            crawl_single(k,v)
            print "Done!"+k
        except :
            print "Crawl Error" + k
            print traceback.format_exc(sys.exc_info())
            continue

def crawl_single(url,route_type):
    html = urllib2.urlopen(url,None,120).read()
    html = "".join(html.split())
    #print html
    route = Route()
    planes = route_plane.findall(html)
    tdatee = []
    min = 9999999
    for plann in planes:
        if(plann[2] == "0"):
            print "None Left,Continue."
            continue
        tpri = plann[3].replace(",","")
        tdatee.append(str(time.localtime().tm_year) + "-" +"-".join([plann[0],plann[1]]))
        if(int(tpri)!= 0 & int(tpri) < min):
            min = int(tpri)
    if(len(tdatee) == 0):
        print "Warn!This Route has No Avai Plan .Cur Url is "+url
        return
    print ",".join(tdatee),min
    route.dates = ",".join(tdatee)
    route.price = str(min)
    m = route_title.search(html)
    if(m):
        print  m.group(1)
    else:
        print  "Crawl Error!Chief Culprit Is " + url
        return

    route.site = route_site_id
    route.dest = route_dest
    route.supplier = route_supplier
    route.supplier_url = route_supplier_url

    route.title = m.group(1)
    route.rout_url = url
    route.outcity = "北京"

    if(route_type == "nil"):
        m = route_type_re.search(html)
        if(m):
            route.type = m.group(1)
    else:
        route.type = route_type

    m = route_payown.search(html)
    if(m):
        route.payown = m.group(1)

    m = route_has.search(html)
    if(m):
        route.meal = m.group(1)
    route.sights = ",".join(route_sights.findall(html))


    m = route_image_url.search(html)
    if(m):
        route.img_url = "http://www.sztravel.com.cn"+ m.group(1)

    route.outcity = "深圳"

    m = route_days.search(html)
    if(m):
        route.days = m.group(1)
    m = route_detail.search(html)
    if(m):
        route.detail = omit_all_html_tags.sub("",m.group(1))

    route.telphone = "0755-25933999"

    route.schedules=[]

    sch_cnts = each_schedule.findall(html)
    route.days = str(len(sch_cnts))
    cnum = 0
    for sch_cnt in sch_cnts:
            cnum = cnum + 1
            schedule = Schedule()
            schedule.s_num = str(cnum)
            #print oitem
            if(sch_cnt[1].find("<span>早餐：</span>有") > -1):
                schedule.s_brf = "1"
            if(sch_cnt[1].find("<span>午餐：</span>有") > -1):
                schedule.s_lunch = "1"
            if(sch_cnt[1].find("<span>晚餐：</span>有") > -1):
                schedule.s_dinner = "1"
            if(sch_cnt[1].find("住宿") > -1):
                schedule.s_hotel = sch_cnt[1][sch_cnt[1].find("住宿")+16:]
            schedule.s_brief = sch_cnt[2]
            schedule.s_traffic = sch_cnt[0]
            route.schedules.append(schedule)

    route.updRoute()
    route.updRoute_Schedules()

def do_crawl():
    try:
        exec_crawl()
    except:
        print traceback.format_exc(sys.exc_info())

def usage():
    print '''Usage: %s [OPTIONS]...
Crawl hotel informations.

  -d,  --debug               enable pdb debugger

  -h,  --help                display this help and exit
''' % sys.argv[0]

if __name__ == '__main__':
#    get_all_route_list()
#    crawl_single("http://www.sztravel.com.cn/travel/detail.aspx?xlid=12440&ctype=1","nil")
    do_crawl()
#	script_path = os.path.dirname(os.path.realpath(__file__))
#	os.chdir(script_path)
#	do_debug = False
#
#	try:
#		opts, args = getopt.gnu_getopt(sys.argv[1:], 'hdno:p:', ['help', 'debug', 'no-cache', 'only-cache', 'provider'])
#	except getopt.GetoptError:
#		usage()
#		sys.exit()
#
#	for opt, arg in opts:
#		if opt in ('-h', '--help'):
#			usage()
#			sys.exit()
#		elif opt in ('-d', '--debug'):
#			do_debug = True
#		elif opt in ('-n', '--no-cache'):
#			use_cache = False
#		elif opt in ('-o', '--only-cache'):
#			if arg.lower() in ('no', 'n', '0'):
#				only_cache = False
#			else:
#				only_cache = True
#		elif opt in ('-p', '--provider'):
#			pass
#
#	if do_debug:
#		import pdb
#		pdb.set_trace()
#	do_crawl()



