#!/usr/bin/env python
# coding=utf-8

import os, sys, re, traceback, logging
import urllib
import time
import datetime
import getopt
import urllib2
from Route import *

curpath = sys.path[0] + os.sep
rootpath = curpath[:curpath.find("resources")] + os.sep

entrance = "http://www.imytour.com/product/content.php?id=318"

route_dest = ""

route_payown = re.compile("<TR><TDheight=10></TD></TR><TR><TDheight=10>([^<].*?)</TD></TR><TR><TDheight=10></TD></TR>")

route_supplier_url = "http://www.imytour.com/"

route_title = re.compile('<divclass="neirongbg_left_title">(.*?)</div>')

#route_title_bak = re.compile("<strong>(.*?)</strong></TD>")

#route_type =   "普通线路"

route_supplier = "迈途旅游网"

#route_telphone = re.compile("<FONTcolor=#f79709size=3>电&nbsp;&nbsp;&nbsp;话：(.*?)</FONT>")

#route_days = re.compile(r"<TD>天数：(\d+)</TD>")

#route_price = re.compile(r'<TDwidth="39%">网上报价\(大人\)：<SPANclass=price>(\d+)</SPAN></TD>')

#route_guide = ""

#route_dates = re.compile("出发日期：(.*?)</P><Palign=center>")

route_details = re.compile('<divclass="xh">(.*?)</div>')

route_price_out_dates = re.compile(r'<inputtype="hidden"id="ProductPrice"name="ProductPrice"value="(\d+)"><ul><li>(.*?)</li><li>(.*?)</li>')

route_out_date_prices = re.compile(r'<inputtype="hidden".*?><ul><li>(.*?)</li><li>(\d{4}-\d{2}-\d{2})</li><li.*?>¥(.*?)</li><li>')

#route_out_city = re.compile("<TD>出发城市：(.*?)</TD>")

route_image_url = re.compile(r'embedsrc="http://www.imytour.com/media/flash/show.swf\?image=(.*?)"width="183"height="131"')

route_person = ""

route_go_t = "-"

route_back_t = "-"

#route_sights = re.compile("<STRONG>(.*?)</STRONG>")

#schedule_place = re.compile(r"<TDclass=STYLE1background=http://www\.52zql\.com/upfile/2009-1-10/2009011011010979252\.gif>(.*?)</TD>")

#schedule_traffic = re.compile('<TDalign=rightwidth="9%"bgColor=#f6f6f6>交通：</TD><TDwidth="15%"bgColor=#f6f6f6>(.*?)</TD>')

#schedule_repast = re.compile('<TDalign=rightwidth="8%"bgColor=#f6f6f6>用餐：</TD><TDwidth="13%"bgColor=#f6f6f6>(.*?)</TD>')

#schedule_hotel = re.compile('住宿：</TD><TDwidth="15%"bgColor=#f6f6f6>(.*?)</TD>')

#schedule_city = re.compile('抵达城市：</TD><TDwidth="18%"bgColor=#f6f6f6>(.*?)</TD>')

#schedule_brief = re.compile("<TRclass=Table-dot><TDclass=nrvAlign=top>(.*?)</TR>")

each_schedule = re.compile(r'<divclass="xxnr_content"id="pidinfo_cont_\d+"style.*?><divclass="xxnr_content_tielt">(.*?)</div></div>')

each_item = re.compile(r'第\d+天(.*?)</td>.*酒店:<fontcolor="#ff0000;">(.*?)</font>.*用餐:<fontcolor="#ff0000;">(.*?)</font>.*<divclass="wen">(.*?)</div>')

omit_all_html_tags = re.compile("<.*?>")

def get_source_from_gb(url):
    html = ""
    try:
        html = urllib2.urlopen(url,None,120).read().decode("gb2312").encode("utf-8")
    except UnicodeDecodeError:
        html = urllib2.urlopen(url,None,120).read().decode("gbk").encode("utf-8")
    print "debug--"+url
    return html

def get_all_route_list():
    html = urllib2.urlopen(entrance,None,120).read()
    plist = {}
    ret = {}
    retype = re.compile('<div\sclass="mdd\sc">([\s\S]+?)<ul\sclass="mdd_title">([\s\S]+?)</div>')
    for type,match_group in retype.findall(html):
        #if type.find('签'):
        #    continue
        if type.find('团队游') != -1:
            type = '团队游'
        else:
            type = type.strip()
        reobj = re.compile('<LI><a href="(.*?)" title="(.*?游)" >[\s\S]+?</A></LI>')
        for match in reobj.finditer(match_group):
            plist[match.group(1)] = type
        for k,v in plist.iteritems():
            print k,v
            html = urllib2.urlopen(k,None,120).read()
            for match in re.finditer(r'<dt><a href="(.*?)"><img src=".*?" width="140px" height="94px"></a></dt>', html):
                print match.group(1)
                ret[match.group(1)] = v
    return  ret

def exec_crawl():
    for k,v in get_all_route_list().iteritems():
        try:
            print "Begin To Crawl "+k
            crawl_single(k,v)
            print "Done!"+k
        except :
            print "Crawl Error" + k
            print traceback.format_exc(sys.exc_info())
            continue
def schedule_bak(html):
    schedules = []
    
    return schedules
def crawl_single(url,route_type):
    lines = urllib2.urlopen(url,None,120).read()
    html = "".join(lines.split())

    print html

    m = route_title.search(html)
    if(m):
        print  m.group(1)
    else:
        print  "Crawl Error!Chief Culprit Is " + url
        return
    route = Route()
    route.site = "52"
    route.dest = route_dest
    route.supplier = route_supplier
    route.supplier_url = route_supplier_url
    route.type = route_type
    route.title = m.group(1)
    m = route_image_url.search(html)
    if(m):
        route.img_url = m.group(1)
    route.rout_url = url
    datess = []
    min = 9999999
    for match in route_out_date_prices.finditer(html):
        datess.append(match.group(2))
        if(int(match.group(3)) < min):
            min = int(match.group(3))
        route.outcity = match.group(1)
    if(min == 9999999):
        print "Price Node.Passed."+url
    route.price = str(min)
    route.dates = ",".join(datess)
    tmpc = 0
    for match in route_details.finditer(html):
        mmm = omit_all_html_tags.sub("",match.group(1))
        if(tmpc == 0):
            route.detail = mmm
        elif(tmpc == 3):
            route.payown = mmm
        elif(tmpc == 2):
            route.meal = mmm
        tmpc = tmpc + 1

    print route.tostr()

    route.schedules=[]
  
    sch_cnts = each_schedule.findall(html)
    
    for sch_cnt in sch_cnts:
        num = 1
        for match in each_item.finditer(sch_cnt):
            schedule = Schedule()
            tmp = 0
            schedule.s_traffic = match.group(1)
            schedule.s_brief = match.group(4)
            schedule.s_hotel = match.group(2)
            oitem = match.group(3)
            if(oitem.find("早") > -1):
                    schedule.s_brf = "1"
            elif(oitem.find("午") > -1):
                    schedule.s_lunch = "1"
            elif(oitem.find("晚") > -1):
                    schedule.s_dinner = "1"
            schedule.s_num = num
            num += 1
            route.schedules.append(schedule)
    route.updRoute()
    route.updRoute_Schedules()

def do_crawl():
    try:
        exec_crawl()
    except:
        print traceback.format_exc(sys.exc_info())

def usage():
    print '''Usage: %s [OPTIONS]...
Crawl hotel informations.

  -d,  --debug               enable pdb debugger

  -h,  --help                display this help and exit
''' % sys.argv[0]

if __name__ == '__main__':
    #get_all_route_list()
#get_all_route_list_by_type("http://www.gdcyts.com/travel/route_type.asp?route_type=1&pageno=1&page_record_num=1000")
#    crawl_single("http://www.gdcyts.com/travel/route_detail.asp?route_code=FEIJI&member_id=gdql&adv_id=","省内游")
#    crawl_single("http://www.imytour.com/product/content.php?id=346","东南亚旅游")
    do_crawl()
#	script_path = os.path.dirname(os.path.realpath(__file__))
#	os.chdir(script_path)
#	do_debug = False
#
#	try:
#		opts, args = getopt.gnu_getopt(sys.argv[1:], 'hdno:p:', ['help', 'debug', 'no-cache', 'only-cache', 'provider'])
#	except getopt.GetoptError:
#		usage()
#		sys.exit()
#
#	for opt, arg in opts:
#		if opt in ('-h', '--help'):
#			usage()
#			sys.exit()
#		elif opt in ('-d', '--debug'):
#			do_debug = True
#		elif opt in ('-n', '--no-cache'):
#			use_cache = False
#		elif opt in ('-o', '--only-cache'):
#			if arg.lower() in ('no', 'n', '0'):
#				only_cache = False
#			else:
#				only_cache = True
#		elif opt in ('-p', '--provider'):
#			pass
#
#	if do_debug:
#		import pdb
#		pdb.set_trace()
#	do_crawl()



