#!/usr/bin/env python
# coding=utf-8

import os, sys, re, traceback, logging
import urllib
import time
import datetime
import getopt
import urllib2
from Route import *

curpath = sys.path[0] + os.sep
rootpath = curpath[:curpath.find("resources")] + os.sep

entrance = "http://www.aoyou.com/DomesticPackage/UC/GDGrpCity.aspx?T=5&sFldtxt=T_DepCity&sFldid=T_DepCityID"

route_dest = ""

route_payown = re.compile('费用不包含</strong>：</div><divclass="xq_zytishi"><ul><li>(.*?)</li></ul></div>')

route_has = re.compile('费用包含</strong>：</div><divclass="xq_zytishi"><ul><li>(.*?)</li></ul></div>')

route_supplier_url = "http://www.aoyou.com/"

route_supplier = "中青旅遨游"

route_title = re.compile("<title>(.*?)_中青旅遨游网</title>")

#route_title_bak = re.compile("<strong>(.*?)</strong></TD>")

#route_type_re =   re.compile("网站首页</A>>><A.*?>.+</A>>>(.+?)</div>")

route_telphone = re.compile(r"<h5>(\S+?)</h5>")

route_days = re.compile(r"行程天数：<span><b>(\d+)天</b>")

route_price = re.compile(r'<divclass="jbxxjiage4">(\d+)</div>')

#route_date_prices = re.compile(r'<tdalign="center">(\d{4}-\d{2}-\d{2})</td><tdclass="priceList"align="center">(\d+)元</td>')

route_guide = ""

route_dates = re.compile("出发班期：<span><b>(.*?)</b>")

route_detail = re.compile('<divclass="xq_tit">亮点特色：</div><divclass="xq_nr2">(.*?)</div>')

route_out_city = re.compile('<divclass="jbxx3">出发城市：<span><b>(.*?)</b></span>')

route_image_url = re.compile('<divclass="jbxximg"><imgsrc="(.*?)".*?></div>')

route_person = ""

route_go_t = "-"

route_back_t = "-"

route_traffic = re.compile("<b>旅游交通：</b>(.*?)<P>")

route_sights = re.compile('<span><ahref=".*?"target="_blank">(.*?)</a></span>')

#route_plane = re.compile(r'document\.getElementById\("frame1"\)\.location\("(.*?)"\)')

#route_plane_dates = re.compile(r"</td><td class='dot_line txt3' width='12%'>(\d+)月(\d+)日</td><td class")

#route_plane_prices = re.compile(r"￥(\d+).00")

#route_plane_left = re.compile("width='10%'><b>(.*?)</td>")

schedule_place = re.compile("<p>(.*?)<br/>")

#schedule_traffic = re.compile('<TDalign=rightwidth="9%"bgColor=#f6f6f6>交通：</TD><TDwidth="15%"bgColor=#f6f6f6>(.*?)</TD>')

schedule_repast_hotel = re.compile(".*<br/>(.*?)</p>")

schedule_hotel = re.compile('<SPANclass=scene_instyle="PADDING-TOP:3px">(.*?)</SPAN>')

#schedule_city = re.compile('抵达城市：</TD><TDwidth="18%"bgColor=#f6f6f6>(.*?)</TD>')

schedule_brief = re.compile("<br/>(.*?)<")

each_schedule = re.compile(r"D\d+</FONT></B></DIV></TD><TDwidth=455><DIV><FONTsize=2>(.*?)</FONT></DIV></TD><TDwidth=91><DIValign=center><FONTsize=2>(.*?)</FONT></DIV></TD><TDwidth=98><DIValign=center><FONTsize=2>(.*?)</FONT></DIV></TD></TR>")

#each_item = re.compile("<TD.*?>(.*?)</TD><TD.*?>(.*?)</TD><TD.*?>(.*?)</TD><TD.*?>(.*?)</TD><TD.*?>(.*?)</TD>")

omit_all_html_tags = re.compile("<.*?>")

def get_source_from_gb(url):
    html = ""
    try:
        html = urllib2.urlopen(url,None,120).read().decode("gb2312").encode("utf-8")
    except UnicodeDecodeError:
        html = urllib2.urlopen(url,None,120).read().decode("gbk").encode("utf-8")
    print "debug--"+url
    return html

def get_source_by_post(url,data):
    html = ""
    try:
        html = urllib2.urlopen(url,data,120).read().decode("gb2312").encode("utf-8")
    except UnicodeDecodeError:
        html = urllib2.urlopen(url,data,120).read().decode("gbk").encode("utf-8")
    print "debug--"+url
    return html

def get_all_route_list():
    ret = {}
    html = get_source_from_gb(entrance)
    #print html
    citys = re.compile(r"SetCity\('(\d+)\|.*?'\)").findall(html)
    home = "http://www.aoyou.com/DomesticPackage/ProductSearch.aspx?InternalFlag=1&ArrCityID=&DepCityID="
    inter =  "http://www.aoyou.com/DomesticPackage/ProductSearch.aspx?InternalFlag=2&ArrCityID=&DepCityID="
    for city in citys:
       html = get_source_from_gb(home + city)
       html = "".join(html.split())
       for match in re.compile(r"showProductInfo\((\d+),(\d+),(\d+)\)").finditer(html):
            turl = "http://www.aoyou.com/DomesticPackage/p%si%sa%s"  % (match.group(1),match.group(2),match.group(3))
            ret[turl] = "国内自由行"
            print turl
       html = get_source_from_gb(inter + city)
       html = "".join(html.split())
       for match in re.compile(r"showProductInfo\((\d+),(\d+),(\d+)\)").finditer(html):
            turl = "http://www.aoyou.com/DomesticPackage/p%si%sa%s"  % (match.group(1),match.group(2),match.group(3))
            ret[turl] = "出境自由行"
            print turl
    return  ret

def exec_crawl():
    for k,v in get_all_route_list().iteritems():
        try:
            print "Begin To Crawl "+k +" Type " + v
            crawl_single(k,v)
            print "Done!"+k
        except :
            print "Crawl Error" + k
            print traceback.format_exc(sys.exc_info())
            continue

def crawl_single(url,route_type):
    html = get_source_from_gb(url)
    html = "".join(html.split())
    #print html
    route = Route()

    m = route_title.search(html)
    if(m):
        print  m.group(1)
    else:
        print  "Crawl Error!Chief Culprit Is " + url
        return

    route.site = "30"
    route.dest = route_dest
    route.supplier = route_supplier
    route.supplier_url = route_supplier_url

    route.title = m.group(1)
    route.rout_url = url
    route.outcity = "北京"

    route.type = route_type

    nnn = re.search(r"p(\d+)i", url)
    if(nnn):
        tht = get_source_from_gb("http://www.aoyou.com/DomesticPackage/ProductDetailService.ashx?OprType=4&ProductID="+nnn.group(1))
        tht = "".join(tht.split())
        #print tht
        m = route_payown.search(html)
        if(m):
           route.payown = re.compile("</li><li>").sub(',',m.group(1))

        m = route_has.search(html)
        if(m):
          ttt =  m.group(1)
          route.meal = re.compile("</li><li>").sub(',',m.group(1))
        tht = get_source_from_gb("http://www.aoyou.com/DomesticPackage/ProductDetailService.ashx?OprType=2&ProductID="+nnn.group(1))
        tht = "".join(tht.split())
        #print tht
        route.sights = ",".join(route_sights.findall(tht))

        

    m = route_image_url.search(html)
    if(m):
        route.img_url = m.group(1)

    m = route_out_city.search(html)
    if(m):
        route.outcity = m.group(1)

    m = route_dates.search(html)
    if(m):
        route.dates = m.group(1)
    m = route_days.search(html)
    if(m):
        route.days = m.group(1)
    m = route_detail.search(html)
    if(m):
        route.detail = omit_all_html_tags.sub("",m.group(1))

    m = route_telphone.search(html)
    if(m):
        route.telphone = m.group(1)
    m = route_price.search(html)
    if(m):
        route.price = m.group(1)
    else:
        print "None Price.Passed!"
        return

    #route.schedules=[]

#    sch_cnts = each_schedule.findall(html)
#    route.days = str(len(sch_cnts))
#    cnum = 0
#    for sch_cnt in sch_cnts:
#            print sch_cnt
#            cnum = cnum + 1
#            schedule = Schedule()
#            schedule.s_num = str(cnum)
#            schedule.s_brief = "".join(schedule_brief.findall(sch_cnt))
#            schedule.s_hotel = "".join(schedule_hotel.findall(sch_cnt))
#            schedule.s_place = "".join(schedule_place.findall(sch_cnt))
#            oitem = "".join(schedule_repast_hotel.findall(sch_cnt))
#            #print oitem
#            if(oitem.find("早餐:含") > -1):
#                schedule.s_brf = "1"
#            if(oitem.find("午餐:含") > -1):
#                schedule.s_lunch = "1"
#            if(oitem.find("晚餐:含") > -1):
#                schedule.s_dinner = "1"
#            if(oitem.find("住宿:") > -1):
#                schedule.s_hotel = oitem[oitem.find("住宿:")+7:]
#            if(schedule.s_brief == ""):
#                schedule.s_brief = sch_cnt
#            print schedule.tostr()
#            route.schedules.append(schedule)

    route.updRoute()
    #route.updRoute_Schedules()

def do_crawl():
    try:
        exec_crawl()
    except:
        print traceback.format_exc(sys.exc_info())

def usage():
    print '''Usage: %s [OPTIONS]...
Crawl hotel informations.

  -d,  --debug               enable pdb debugger

  -h,  --help                display this help and exit
''' % sys.argv[0]

if __name__ == '__main__':
#    get_all_route_list()
#    crawl_single("http://www.aoyou.com/DomesticPackage/p2542i1a1","境外旅游")
    do_crawl()
#	script_path = os.path.dirname(os.path.realpath(__file__))
#	os.chdir(script_path)
#	do_debug = False
#
#	try:
#		opts, args = getopt.gnu_getopt(sys.argv[1:], 'hdno:p:', ['help', 'debug', 'no-cache', 'only-cache', 'provider'])
#	except getopt.GetoptError:
#		usage()
#		sys.exit()
#
#	for opt, arg in opts:
#		if opt in ('-h', '--help'):
#			usage()
#			sys.exit()
#		elif opt in ('-d', '--debug'):
#			do_debug = True
#		elif opt in ('-n', '--no-cache'):
#			use_cache = False
#		elif opt in ('-o', '--only-cache'):
#			if arg.lower() in ('no', 'n', '0'):
#				only_cache = False
#			else:
#				only_cache = True
#		elif opt in ('-p', '--provider'):
#			pass
#
#	if do_debug:
#		import pdb
#		pdb.set_trace()
#	do_crawl()



