#!/usr/bin/env python
# coding=utf-8

import os, sys, re, traceback, logging
import urllib
import time
import datetime
import getopt
import urllib2
from Route import *

curpath = sys.path[0] + os.sep
rootpath = curpath[:curpath.find("resources")] + os.sep

entrance = "http://www.ynklzl.com//Tours/JS/ShowClass_Navigation.js"

route_dest = ""

route_payown = re.compile("<DIValign=center><B>报价不含</B></DIV></TD><TDwidth=654><DIV>(.*?)</DIV></TD>")

route_has = re.compile("<DIValign=center>.*?报价.*?包含.*?</DIV></TD><TDvAlign=topwidth=654>(.*?)</TD></TR>")

route_supplier_url = "http://www.ctsqd.com/"

route_title = re.compile('<tdheight="22"colspan="2"align="center"class="tt3">(.*?)</td></tr>')

#route_title_bak = re.compile("<strong>(.*?)</strong></TD>")

#route_type =   "普通线路"

route_supplier = "青岛中旅"

#route_telphone = re.compile("<FONTcolor=#f79709size=3>电&nbsp;&nbsp;&nbsp;话：(.*?)</FONT>")

#route_days = re.compile(r"行程天数：(\d+)天")

route_price = re.compile(r"价格：(\d+)")

#route_guide = ""

route_dates = re.compile('<tdheight="23"class="tt3">出团日期：(.*?)(&nbsp;)*?<br>')

route_detail = re.compile("<divclass=colorbg>(.*?)</div>")

#route_out_city = re.compile("<TD>出发城市：(.*?)</TD>")

#route_image_url = re.compile('<Palign=left><IMGsrc="(.*?)"')

route_person = ""

route_go_t = "-"

route_back_t = "-"

route_sights = re.compile("<STRONG>(.*?)</STRONG>")

#schedule_place = re.compile(r"<TDclass=STYLE1background=http://www\.52zql\.com/upfile/2009-1-10/2009011011010979252\.gif>(.*?)</TD>")

#schedule_traffic = re.compile('<TDalign=rightwidth="9%"bgColor=#f6f6f6>交通：</TD><TDwidth="15%"bgColor=#f6f6f6>(.*?)</TD>')

#schedule_repast = re.compile('<TDalign=rightwidth="8%"bgColor=#f6f6f6>用餐：</TD><TDwidth="13%"bgColor=#f6f6f6>(.*?)</TD>')

#schedule_hotel = re.compile('住宿：</TD><TDwidth="15%"bgColor=#f6f6f6>(.*?)</TD>')

#schedule_city = re.compile('抵达城市：</TD><TDwidth="18%"bgColor=#f6f6f6>(.*?)</TD>')

#schedule_brief = re.compile("<TRclass=Table-dot><TDclass=nrvAlign=top>(.*?)</TR>")

each_schedule = re.compile(r"<TR><TDwidth=48><DIValign=center>D\d+</DIV></TD><TDwidth=48><DIValign=center></DIV></TD><TDvAlign=topwidth=504><DIValign=left>(.*?)</DIV></TD><TDwidth=72><DIValign=center>(.*?)</DIV></TD><TDwidth=60><DIValign=center>(.*?)</DIV></TD></TR>")

#each_item = re.compile("<TD.*?>(.*?)</TD><TD.*?>(.*?)</TD><TD.*?>(.*?)</TD><TD.*?>(.*?)</TD><TD.*?>(.*?)</TD>")

omit_all_html_tags = re.compile("<.*?>")

def get_source_from_gb(url):
    html = ""
    try:
        html = urllib2.urlopen(url,None,120).read().decode("gb2312").encode("utf-8")
    except UnicodeDecodeError:
        html = urllib2.urlopen(url,None,120).read().decode("gbk").encode("utf-8")
    print "debug--"+url
    return html

def get_all_route_list():
    entras = [("国内线路","http://www.ctsqd.com/2guoneiyou.asp"),("大巴线路","http://www.ctsqd.com/4shengneiyou.asp")]
    plist = {}
    for k,v in entras:
        print k,v
        html = get_source_from_gb(v)
        html = "".join(html.split())
        match = re.search(r'</a>&nbsp;\|&nbsp;<ahref=".*?page=(\d+)&id=">末页</a>', html)
        tpage = int(match.group(1))
        htmlall = ""
        #note:only used for tiny website crawl
        for i in [1,tpage]:
            uuurl = v + "?page=" + str(i)
            html = get_source_from_gb(uuurl)
            htmlall += html
        htmlall = "".join(htmlall.split())
        print htmlall
        reobj = re.compile('<tdclass="table-xia2"><ahref="(.*?)">.*?</a></td><tdclass="table-xia2"align="left">(.*?)</td><tdclass="table-xia2"align="left"><spanclass="style3">(.*?)</span>')


        for match in reobj.finditer(htmlall):
            tprice = match.group(3)
            if(tprice.find("电询") > -1):
                print "Price None.Continue.Next"
                continue
            print "http://www.ctsqd.com/"+match.group(1) ,k
            plist["http://www.ctsqd.com/"+match.group(1)] = k
    return  plist

def exec_crawl():
    for k,v in get_all_route_list().iteritems():
        try:
            print "Begin To Crawl "+k +" Type " + v
            crawl_single(k,v)
            print "Done!"+k
        except :
            print "Crawl Error" + k
            print traceback.format_exc(sys.exc_info())
            continue

def crawl_single(url,route_type):
    html = get_source_from_gb(url)
    html = "".join(html.split())
    print html

    m = route_title.search(html)
    if(m):
        print  m.group(1)
    else:
        print  "Crawl Error!Chief Culprit Is " + url
        return
    route = Route()
    route.site = "47"
    route.dest = route_dest
    route.supplier = route_supplier
    route.supplier_url = route_supplier_url
    route.type = route_type
    route.title = m.group(1)
    route.rout_url = url
    route.outcity = "青岛"

    m = route_dates.search(html)
    if(m):
        route.dates = m.group(1)

    m = route_has.search(html)
    if(m):
        ttt =  m.group(1)
        ttts = re.compile("<DIV>(.*?)</DIV>").findall(ttt)
        for ar in ttts:
            art =  ar[9:]
            if ar.find('门票') > -1:
                route.tickets = art
            elif ar.find('导游') > -1:
                route.guide = art
            elif ar.find('住宿') > -1:
                route.hotel = art
            elif ar.find('用餐') > -1:
                route.meal = art
            elif ar.find('交通') > -1:
                route.traffic01 = art

    m = route_detail.search(html)
    if(m):
        route.detail = m.group(1)

    m = route_price.search(html)
    if(m):
        route.price = m.group(1)
    else:
        print "None Price.Passed!"
        return

    print route.tostr()

    route.schedules=[]

    sch_cnts = each_schedule.finditer(html)
    num = 1
    for match in sch_cnts:
            schedule = Schedule()
            tmp = 0
            schedule.s_brief = match.group(1)
            schedule.s_hotel = match.group(2)
            oitem = match.group(3)
            if(oitem.find("早") > -1):
                schedule.s_brf = "1"
            elif(oitem.find("中") > -1):
                schedule.s_lunch = "1"
            elif(oitem.find("晚") > -1):
                schedule.s_dinner = "1"
            schedule.s_num = num
            num += 1
            #print schedule.tostr()
            route.schedules.append(schedule)
    route.days = str(len(route.schedules))
    route.updRoute()
    route.updRoute_Schedules()

def do_crawl():
    try:
        exec_crawl()
    except:
        print traceback.format_exc(sys.exc_info())

def usage():
    print '''Usage: %s [OPTIONS]...
Crawl hotel informations.

  -d,  --debug               enable pdb debugger

  -h,  --help                display this help and exit
''' % sys.argv[0]

if __name__ == '__main__':
#    get_all_route_list()
#get_all_route_list_by_type("http://www.gdcyts.com/travel/route_type.asp?route_type=1&pageno=1&page_record_num=1000")
#    crawl_single("http://www.ctsqd.com/ShowNews-sn.asp?id=697","大巴线路")
#crawl_single("http://www.zql.yn.cn/route/info41200.html")
    do_crawl()
#	script_path = os.path.dirname(os.path.realpath(__file__))
#	os.chdir(script_path)
#	do_debug = False
#
#	try:
#		opts, args = getopt.gnu_getopt(sys.argv[1:], 'hdno:p:', ['help', 'debug', 'no-cache', 'only-cache', 'provider'])
#	except getopt.GetoptError:
#		usage()
#		sys.exit()
#
#	for opt, arg in opts:
#		if opt in ('-h', '--help'):
#			usage()
#			sys.exit()
#		elif opt in ('-d', '--debug'):
#			do_debug = True
#		elif opt in ('-n', '--no-cache'):
#			use_cache = False
#		elif opt in ('-o', '--only-cache'):
#			if arg.lower() in ('no', 'n', '0'):
#				only_cache = False
#			else:
#				only_cache = True
#		elif opt in ('-p', '--provider'):
#			pass
#
#	if do_debug:
#		import pdb
#		pdb.set_trace()
#	do_crawl()



