#!/usr/bin/env python
# coding=utf-8

import os, sys, re, traceback, logging
import urllib
import time
import datetime
import getopt
import urllib2
from Route import *

curpath = sys.path[0] + os.sep
rootpath = curpath[:curpath.find("resources")] + os.sep

entrance = ""

route_dest = ""

route_payown = re.compile("<strong>价格不含：</strong><p>(.*?)</p>")

route_has = re.compile("<strong>价格包含：</strong><p>(.*?)</p>")

route_supplier_url = "http://www.jjtravel.com/"

route_supplier = "锦江旅游"

route_title = re.compile("<dt>线路名称：(.*?)</dt>")

#route_title_bak = re.compile("<strong>(.*?)</strong></TD>")

#route_type_re =   re.compile("网站首页</A>>><A.*?>.+</A>>>(.+?)</div>")

route_telphone = re.compile(r"<h5>(\S+?)</h5>")

#route_days = re.compile(r"行程天数：(\d+)天")

#route_price = re.compile(r'标准团报价：(\d+)\.00')

route_date_prices = re.compile(r'<tdalign="center">(\d{4}-\d{2}-\d{2})</td><tdclass="priceList"align="center">(\d+)元</td>')

route_guide = ""

#route_dates = re.compile("出团日期：(.*?)</TD>")

route_detail = re.compile('<DIV.*?>线路特色：</DIV><DIVstyle="PADDING-LEFT:6px"class="txt2">(.*?)</DIV>')

#route_out_city = re.compile("<TD>出发城市：(.*?)</TD>")

route_image_url = re.compile(r"img1\.src='(.*?)'")

route_person = ""

route_go_t = "-"

route_back_t = "-"

route_traffic = re.compile("<b>旅游交通：</b>(.*?)<P>")

route_sights = re.compile("<STRONG>(.*?)</STRONG>")

#route_plane = re.compile(r'document\.getElementById\("frame1"\)\.location\("(.*?)"\)')

#route_plane_dates = re.compile(r"</td><td class='dot_line txt3' width='12%'>(\d+)月(\d+)日</td><td class")

#route_plane_prices = re.compile(r"￥(\d+).00")

#route_plane_left = re.compile("width='10%'><b>(.*?)</td>")

schedule_place = re.compile("<p>(.*?)<br/>")

#schedule_traffic = re.compile('<TDalign=rightwidth="9%"bgColor=#f6f6f6>交通：</TD><TDwidth="15%"bgColor=#f6f6f6>(.*?)</TD>')

schedule_repast_hotel = re.compile(".*<br/>(.*?)</p>")

schedule_hotel = re.compile('<SPANclass=scene_instyle="PADDING-TOP:3px">(.*?)</SPAN>')

#schedule_city = re.compile('抵达城市：</TD><TDwidth="18%"bgColor=#f6f6f6>(.*?)</TD>')

schedule_brief = re.compile("<br/>(.*?)<")

each_schedule = re.compile(r"<strong>第\d+天.*?</strong><p>(.*?)</p>")

#each_item = re.compile("<TD.*?>(.*?)</TD><TD.*?>(.*?)</TD><TD.*?>(.*?)</TD><TD.*?>(.*?)</TD><TD.*?>(.*?)</TD>")

omit_all_html_tags = re.compile("<.*?>")

def get_source_from_gb(url):
    html = ""
    try:
        html = urllib2.urlopen(url,None,120).read().decode("gb2312").encode("utf-8")
    except UnicodeDecodeError:
        html = urllib2.urlopen(url,None,120).read().decode("gbk").encode("utf-8")
    print "debug--"+url
    return html

def get_all_route_list():
    entras = {"国内旅游":"http://www.jjtravel.com/searchList.do?=&=&coptype=%B9%FA%C4%DA&order=&cteamcode=&clinename=&cnation=&idays=&dbgndate=&denddate=&carea=&priceMin=&priceMax=&pageIndex=#tp",
            "出境旅游":"http://www.jjtravel.com/searchList.do?=&=&coptype=%B3%F6%BE%B3&order=&cteamcode=&clinename=&cnation=&idays=&dbgndate=&denddate=&carea=&priceMin=&priceMax=&pageIndex=#tp"}
    ret = {}
    for k,v in entras.iteritems():
        html = get_source_from_gb(v.replace('#tp',str(1)))
        match = re.search(r"共 <strong>(\d+)</strong> 页</a>", html)
        tp = 1
        if(match):
            tp = int(match.group(1))
        print "tp" , tp

        for pg in range(1,tp):
            entra = v.replace('#tp',str(pg))
            print entra
            html = get_source_from_gb(entra)
            for match in re.compile('<a href="(.*?)" target="_blank" style="width:580px;">.*?</a>').finditer(html):
                print "http://www.jjtravel.com"+ match.group(1)
                ret["http://www.jjtravel.com"+ match.group(1)] = k
    return  ret

def exec_crawl():
    for k,v in get_all_route_list().iteritems():
        try:
            print "Begin To Crawl "+k +" Type " + v
            crawl_single(k,v)
            print "Done!"+k
        except :
            print "Crawl Error" + k
            print traceback.format_exc(sys.exc_info())
            continue

def crawl_single(url,route_type):
    html = get_source_from_gb(url)
    html = "".join(html.split())
    print html
    route = Route()
    tdatee = []
    min = 999999999
    for match in route_date_prices.finditer(html):
        tdatee.append(match.group(1))
        if(min > int(match.group(2))):
            min = int(match.group(2))
    if(min == 999999999) :
        min = 0
    if(min == 999999999 | min == 0) :
        print "Can not Get Price Info.Begin to Return.URL is "+ url
        return
    route.dates = ",".join(tdatee)
    route.price = str(min)


    m = route_title.search(html)
    if(m):
        print  m.group(1)
    else:
        print  "Crawl Error!Chief Culprit Is " + url
        return

    route.site = "39"
    route.dest = route_dest
    route.supplier = route_supplier
    route.supplier_url = route_supplier_url

    route.title = m.group(1)
    route.rout_url = url
    route.outcity = "上海"

    route.type = route_type

    m = route_payown.search(html)
    if(m):
        route.payown = m.group(1)

    m = route_telphone.search(html)
    if(m):
        route.telphone = m.group(1)

    m = route_has.search(html)
    if(m):
        ttt =  m.group(1)
        route.meal = ttt

    m = route_detail.search(html)
    if(m):
        route.detail = m.group(1)


    route.schedules=[]

    sch_cnts = each_schedule.findall(html)
    route.days = str(len(sch_cnts))
    cnum = 0
    for sch_cnt in sch_cnts:
            print sch_cnt
            cnum = cnum + 1
            schedule = Schedule()
            schedule.s_num = str(cnum)
            schedule.s_brief = "".join(schedule_brief.findall(sch_cnt))
            schedule.s_hotel = "".join(schedule_hotel.findall(sch_cnt))
            schedule.s_place = "".join(schedule_place.findall(sch_cnt))
            oitem = "".join(schedule_repast_hotel.findall(sch_cnt))
            #print oitem
            if(oitem.find("早餐:含") > -1):
                schedule.s_brf = "1"
            if(oitem.find("午餐:含") > -1):
                schedule.s_lunch = "1"
            if(oitem.find("晚餐:含") > -1):
                schedule.s_dinner = "1"
            if(oitem.find("住宿:") > -1):
                schedule.s_hotel = oitem[oitem.find("住宿:")+7:]
            if(schedule.s_brief == ""):
                schedule.s_brief = sch_cnt
            route.schedules.append(schedule)

    route.updRoute()
    route.updRoute_Schedules()

def do_crawl():
    try:
        exec_crawl()
    except:
        print traceback.format_exc(sys.exc_info())

def usage():
    print '''Usage: %s [OPTIONS]...
Crawl hotel informations.

  -d,  --debug               enable pdb debugger

  -h,  --help                display this help and exit
''' % sys.argv[0]

if __name__ == '__main__':
#    get_all_route_list()
#    crawl_single("http://www.jjtravel.com/tteamdetail.do?uid=6E609916-04AE-4065-ADD8-B0C90C5F40F7","境外旅游")
    do_crawl()
#	script_path = os.path.dirname(os.path.realpath(__file__))
#	os.chdir(script_path)
#	do_debug = False
#
#	try:
#		opts, args = getopt.gnu_getopt(sys.argv[1:], 'hdno:p:', ['help', 'debug', 'no-cache', 'only-cache', 'provider'])
#	except getopt.GetoptError:
#		usage()
#		sys.exit()
#
#	for opt, arg in opts:
#		if opt in ('-h', '--help'):
#			usage()
#			sys.exit()
#		elif opt in ('-d', '--debug'):
#			do_debug = True
#		elif opt in ('-n', '--no-cache'):
#			use_cache = False
#		elif opt in ('-o', '--only-cache'):
#			if arg.lower() in ('no', 'n', '0'):
#				only_cache = False
#			else:
#				only_cache = True
#		elif opt in ('-p', '--provider'):
#			pass
#
#	if do_debug:
#		import pdb
#		pdb.set_trace()
#	do_crawl()



