#!/usr/bin/env python
# coding=utf-8

import os, sys, re, traceback, logging
import urllib
import time
import datetime
import getopt
import urllib2
from Route import *

curpath = sys.path[0] + os.sep
rootpath = curpath[:curpath.find("resources")] + os.sep

entrance = "http://www.zql.yn.cn/route.html"

route_dest = "昆明"

route_payown = re.compile("<TR><TDheight=10></TD></TR><TR><TDheight=10>([^<].*?)</TD></TR><TR><TDheight=10></TD></TR>")

route_supplier_url = "http://www.zql.yn.cn/"

route_title = re.compile("width=17align=absMiddle>(.*?)</TD>")

route_type =   "普通线路"

route_supplier = "云南中青国际旅行社"

route_telphone = re.compile(r"<Pclass=hhhhhhhhhh1>([0-9-]+)(&nbsp;&nbsp;、)*(\d*)</P>")

route_days = re.compile(r"行程天数：</TD><TDclass=hei14background=images/tablebg.gifheight=25>(\d+)天</TD>")

route_price = re.compile(r"height=25>(\d+)元</TD><TDalign=rightbackground=images/tablebg")

route_guide = ""

route_dates = re.compile(r"发团时间：</TD><TDclass=hei14background=images/tablebg\.gifheight=25>(.*?)</TD>")

route_detail = re.compile("<TRclass=TextbgColor=#ffffff><TD>(.*?)</TD>")

route_out_city = "昆明"

route_person = ""

route_go_t = "-"

route_back_t = "-"

route_sights = re.compile("<STRONG>(.*?)</STRONG>")

schedule_place = re.compile(
        r"<TDclass=STYLE1background=http://www\.52zql\.com/upfile/2009-1-10/2009011011010979252\.gif>(.*?)</TD>")

schedule_traffic = re.compile('<TDalign=rightwidth="9%"bgColor=#f6f6f6>交通：</TD><TDwidth="15%"bgColor=#f6f6f6>(.*?)</TD>'
        )

schedule_repast = re.compile('<TDalign=rightwidth="8%"bgColor=#f6f6f6>用餐：</TD><TDwidth="13%"bgColor=#f6f6f6>(.*?)</TD>')

schedule_hotel = re.compile('住宿：</TD><TDwidth="15%"bgColor=#f6f6f6>(.*?)</TD>')

schedule_city = re.compile('抵达城市：</TD><TDwidth="18%"bgColor=#f6f6f6>(.*?)</TD>')

schedule_brief = re.compile("<TRclass=Table-dot><TDclass=nrvAlign=top>(.*?)</TR>")

each_schedule = re.compile('<TRstyle="HEIGHT:18.75pt.*?>(.*?)</TR>')

each_item = re.compile("<TD.*?>(.*?)</TD>")

omit_all_html_tags = re.compile("<.*?>")

def get_all_route_list():
    html = urllib2.urlopen(entrance).read().decode("gb2312").encode("utf-8")
    plist = []
    for match in re.finditer(r"<a href=(route_.*?\.html)>.*?</a>", html):
        plist.append("http://www.zql.yn.cn/"+match.group(1))
    print plist
    route_list = []
    for pl in plist:
        html = urllib2.urlopen(pl).read().decode("gbk").encode("utf-8")
        for match in re.finditer(r"<a href='(route/info\d+\.html)' target='_blank'>.*?</a>", html):
            route_list.append("http://www.zql.yn.cn/"+match.group(1))
    output = open(rootpath + 'zql.data', 'w')
    output.write("@".join(route_list))
    return  route_list

def exec_crawl():
    for route in get_all_route_list():
        try:
            print "Begin To Crawl "+route
            crawl_single(route)
            print "Done!"+route
        except :
            print "Crawl Error" + route
            print traceback.format_exc(sys.exc_info())
            continue

def crawl_single(url):
    lines = urllib2.urlopen(url,None,120).read();
    html = "".join(lines.split()).decode("gb2312").encode("utf-8")
    #print html

    m = route_title.search(html)
    if(m):
        print  m.group(1)
    else:
        print  "Crawl Error!Chief Culprit Is" + url
        return
    route = Route()
    route.site = "48"
    route.dest = route_dest
    route.supplier = route_supplier
    route.supplier_url = route_supplier_url
    route.type = route_type
    route.title = m.group(1)
    route.rout_url = url
    route.outcity = route_out_city

    owns = route_payown.findall(html)
    ttmp = 0
    for own in owns:
        if(ttmp == 0):
            arr = own.split("<br>")
            for ar in arr:
                if ar.find('景点') > -1:
                    route.tickets = ar[33:]
                elif ar.find('导游') > -1:
                    route.guide = ar[33:]
                elif ar.find('住宿') > -1:
                    route.hotel = ar[33:]
                elif ar.find('用餐') > -1:
                    route.meal = ar[33:]
                elif ar.find('交通') > -1:
                    route.traffic01 = ar[33:]

        if(ttmp == 1):
            route.payown = own[16:]
        ttmp = ttmp +1

    m = route_dates.search(html)
    if(m):
        route.dates = m.group(1)
    m = route_days.search(html)
    if(m):
        route.days = m.group(1)
    m = route_detail.search(html)
    if(m):
        route.detail = m.group(1)
    m = route_sights.search(html)
    if(m):
        route.sights = ",".join(route_sights.findall(html)).replace('，',',').replace('，',',')
    m = route_telphone.search(html)
    if(m):
        route.telphone = m.group(1)
    m = route_price.search(html)
    if(m):
        route.price = m.group(1)

    print route.tostr()

    route.schedules=[]
    briefs = schedule_brief.findall(html)
    if(len(briefs) == 0):
        print "Switch To Using Another Template"
        sch_cnts = each_schedule.findall(html)
        for sch_cnt in sch_cnts:
            items = each_item.findall(sch_cnt)
            schedule = Schedule()
            tmp = 0;
            num = 1
            for item in items:
                
                oitem = omit_all_html_tags.sub("",item)
                if(tmp == 1):
                    schedule.s_traffic = oitem
                elif(tmp == 2):
                    schedule.s_brief = oitem
                elif(tmp == 3) :
                    if(oitem.find("早") > -1):
                        schedule.s_brf = "1"
                    elif(oitem.find("中") > -1):
                        schedule.s_lunch = "1"
                    elif(oitem.find("晚") > -1):
                        schedule.s_dinner = "1"
                elif(tmp == 4):
                    schedule.s_place = oitem
                schedule.s_num = num
                num += 1
                route.schedules.append(schedule)
                tmp = tmp + 1
        route.updRoute()
        route.updRoute_Schedules()
        return

    traffics = schedule_traffic.findall(html)

    citys = schedule_city.findall(html)

    hotels = schedule_hotel.findall(html)

    repasts = schedule_repast.findall(html)

    if(len(briefs) == len(traffics) == len(citys) == len(hotels) == len(repasts)):
        tmp = 0
        for br in briefs:
            schedule = Schedule()
            brr =  omit_all_html_tags.sub("", br)
            schedule.s_brief = brr
            schedule.s_place = citys[tmp]
            schedule.s_traffic = traffics[tmp]
            tmp_repasters = repasts[tmp]
            if(tmp_repasters.find("早") > -1):
                schedule.s_brf = "1"
            elif(tmp_repasters.find("中") > -1):
                schedule.s_lunch = "1"
            elif(tmp_repasters.find("晚") > -1):
                schedule.s_dinner = "1"
            route.schedules.append(schedule)
            tmp = tmp + 1
    route.updRoute()
    route.updRoute_Schedules()

def do_crawl():
    try:
       exec_crawl()
    except:
        print traceback.format_exc(sys.exc_info())


def usage():
    print '''Usage: %s [OPTIONS]...
Crawl hotel informations.

  -d,  --debug               enable pdb debugger

  -h,  --help                display this help and exit
''' % sys.argv[0]

if __name__ == '__main__':
    #crawl_single("http://www.zql.yn.cn/route/info47600.html")
    #crawl_single("http://www.zql.yn.cn/route/info41200.html")
    do_crawl()
#	script_path = os.path.dirname(os.path.realpath(__file__))
#	os.chdir(script_path)
#	do_debug = False
#
#	try:
#		opts, args = getopt.gnu_getopt(sys.argv[1:], 'hdno:p:', ['help', 'debug', 'no-cache', 'only-cache', 'provider'])
#	except getopt.GetoptError:
#		usage()
#		sys.exit()
#
#	for opt, arg in opts:
#		if opt in ('-h', '--help'):
#			usage()
#			sys.exit()
#		elif opt in ('-d', '--debug'):
#			do_debug = True
#		elif opt in ('-n', '--no-cache'):
#			use_cache = False
#		elif opt in ('-o', '--only-cache'):
#			if arg.lower() in ('no', 'n', '0'):
#				only_cache = False
#			else:
#				only_cache = True
#		elif opt in ('-p', '--provider'):
#			pass
#
#	if do_debug:
#		import pdb
#		pdb.set_trace()
#	do_crawl()



