"""
Python module to interact indian railways website.
"""

import sys
import urllib
import string
import web
from BeautifulSoup import BeautifulSoup
import pprint
import time
import re

def error(*args):
    print >> sys.stderr, " ".join(map(str, args))

def debug(*args):
    print >> sys.stderr, " ".join(map(str, args))

def ticket_status(pnr):
    status = web.storage()
    """Return reservation status of the ticket specified by the 10 digit PNR number."""
    url = 'http://www.indianrail.gov.in/cgi_bin/inet_pnrstat_cgi.cgi'
    data = {
        'lccp_pnrno1': pnr[:3],
        'lccp_pnrno2': pnr[3:]
    }

    soup = get_soup(url, data)
    tables = soup.findAll('table')
    if len(tables) == 0:
        return "BAD RESULT"
    #for t in tables: print t
    train_details =  [row for row in parse_table(tables[2]) if len(row)]
    status.pnr_number = pnr[:3] + "-" + pnr[3:]
    status.journey_date = train_details[1][3]
    status.train = web.storage(
                    number = train_details[1][1],
                    name = train_details[1][2].title(),
                    frm = train_details[1][4],
                    to = train_details[1][5],)
    ticket_status = [row for row in parse_table(tables[5]) if len(row)]
    status.passengers = []
    status.chart_status = ticket_status.pop()
    for i in ticket_status:
        Details = web.storage(
                passenger = i[0],
                booking_status = i[1],
                current_status = i[2],)
        status.passengers.append(Details)
    return status

def all(seq):
    for a in seq:
        if a is False:
            return False
    return True

def gettext(e):
    return "".join([c.text for c in e.getiterator() if c.text is not None])

def parse1_table(root, attrs, index=0):
    def match(t):
        return all(t.attrib.get(k) == v for k, v in attrs.items())

    tables = filter(match, root.findall('.//{http://www.w3.org/1999/xhtml}table'))
    if len(tables) > index:
        rows = tables[index].findall('.//{http://www.w3.org/1999/xhtml}tr')
        r = rows[1]
        return [[gettext(td) for td in tr.getchildren()] for tr in rows]
    return None

def extract_text(soup):
    return ''.join([str(e).strip() for e in soup.recursiveChildGenerator() if isinstance(e,unicode)])

def parse_table(table):
    rows = table.findAll('tr')
    return [[extract_text(td) for td in tr.findAll('td')] for tr in rows]

def list_to_string(list,s=''):
	
	for text in list:
		if type(text) is type([]):
			s = list_to_string(text,s);
		else:
			s = s+text
	return s

def running_status(train, date):
	"""Return the running status of a train.
	>>> running_status("434SC", "30/05/2007")
	"""
	url = 'http://www.trainenquiry.com/RunningTT_Display.aspx'
	data = urllib.urlencode(dict(drop_sel=train, date=date))
	#result = urllib.urlopen(url + '?' + data)
	url = url+'?'+data
	soup = get_soup(url,{})
	
	#return parse_table(soup)
	#result = parse_table(soup)
	#soup = BeautifulSoup(list_to_string(result))
	print soup.prettify()

def train_schedule(train, date):
    """Returns the schedule of the train. 
    XXX: What is the need of date here?
    """
    url = 'http://www.trainenquiry.com/Schedule_Display.aspx'
    data = urllib.urlencode(dict(drop_sel=train, date=date))
    result = urllib.urlopen(url + '?' + data)
    html = TidyHTMLTreeBuilder.parse(result).getroot()
    return parse_table(html, {'class' : 'Table_rows'})

_station_codes = None

def get_station_code_map():
    """Return a dictionary mapping from station code to station name."""
    global _station_codes

    if _station_codes is not None:
        return _station_codes

    def station_search(first_letter):
        #url = "http://www.indianrail.gov.in/cgi_bin/inet_stncode_cgi.cgi"
        url = "http://www.indianrail.gov.in/cgi_bin/inet_stncode_cgi.cgi"
        data = dict(
            lccp_stnname=first_letter,
            lccp_SearchType="START_STR",
            lccp_choice="STN_NAME")

        result = urllib.urlopen(url, urllib.urlencode(data)).read()
        result = sanitize.HTML(result)
        html = TidyHTMLTreeBuilder.parse(StringIO(result)).getroot()
        rows = parse_table(html, {}, index=3)
        if rows:
            map = {}
            for row in rows[1:]: # skip header
                name, code = row
                map[code] = name
            return map
        return {}

    def call(letter):
        def f():
	    	t.result = station_search(letter)
        t = threading.Thread(target=f, name='SearchThread-' + letter)
        t.start()
        #print 'Started', t.getName()
        return t

    def all_codes():
        import string
        # start a thread for querying for station names starting with each letter
        threads = [call(c) for c in string.lowercase]
        codes = {}

        for t in threads:
            t.join()
            #print "joined with", t.getName(), " - ", t.result
            codes.update(t.result)
        return codes

    _station_codes = all_codes()
    return _station_codes

def get_all_stations():
    def station_search(first_letter):
		url = "http://www.indianrail.gov.in/cgi_bin/inet_stncode_cgi.cgi"
		data = dict(
			lccp_stnname=first_letter,
			lccp_SearchType="START_STR",
			lccp_choice="STN_NAME"
		)
		soup = get_soup(url, data)
		tables = soup.findAll('table')

		# for correct responses, table#4 contains the train data
		if len(tables) == 4:
			return table2map(tables[3])
		else:
			return {}
    
    #result = pmap.pmap(station_search, string.lowercase, nthreads=5)
    result = map(station_search, string.lowercase)
    return web.utils.dictadd(*result)

def train_between_stations(destination,source,date,klass):
#	url = "http://www.indianrail.gov.in/cgi_bin/inet_srcdestnm_cgi_date.cgi"
	url = "http://www.indianrail.gov.in/cgi_bin/inet_srcdest_cgi_date.cgi"

	"""data = dict(lccp_srcstnname="bangalore",
			lccp_dstnstnname="anantapur",
			lccp_classopt="First AC",
			lccp_day="29",
			lccp_month="Apr",
	)
	Class = {	
				"All Class":'ZZ',
				"First AC":'1A',
				"Second AC":'2A',
				"Third	AC":'3A',
				"AC Chair Car":'CC',
				"First Class":'FC',
				"Sleeper Class":'SL',
				"Second Seating":'2S',
			}
	
	data = dict(lccp_src_stncode="BNC",
			lccp_dstn_stncode="ATP",
			lccp_classopt=Class['All Class'],
			lccp_day="19",
			lccp_month="5",
	)
	"""	
	data = dict(lccp_src_stncode=source,
			lccp_dstn_stncode=destination,
			lccp_classopt=klass,
			lccp_day=date[0],
			lccp_month=date[1],
	)
	soup = get_soup(url,data)
#	return soup.prettify()
	try:
	    #return soup.findAll('table')[2].findAll('table')[4]
	    
	    return enquiry_details(soup)
	#    return soup.findAll(attrs = {'name' : re.compile('lccp*')})
	except IndexError:
	    return "sorry there is no trains between this two stations" 

def table2map(table):
    rows = parse_table(table)
    if rows:
        map = {}
        for row in rows[1:]: # skip header
            num = row[0]
            map[num] = row
        return map
    return {}

def get_soup(url, data, predicate=None, max_trials=50):
    """urlopen the specified url until the predicate returns True on the soup."""
    for i in range(max_trials):
        result = urllib.urlopen(url, urllib.urlencode(data)).read()
        soup = BeautifulSoup(result)
        if predicate is None or predicate(soup):
            return soup
        else:
			pass
            #error("Bad response from", url, "for", data)
    raise Exception, "Bad response"

def map(train_search,zero99):
	return [train_search(num) for num in zero99] 

def get_all_trains():
    def train_search(text):
        #print 'train_search', text
        url = "http://www.indianrail.gov.in/cgi_bin/inet_trnnum_cgi.cgi"
        data = dict(lccp_trnname=text)

        # sometimes indian railways server gives bad response.
        # Good responses can be identified by the presence of string "Train Names with Details".
        predicate = lambda soup: "Train Names with Details" in extract_text(soup)
        soup = get_soup(url, data, predicate)
        tables = soup.findAll('table')

        # for correct responses, table#4 contains the train data
        if len(tables) == 5:
			return table2map(tables[4])
        else:
            return {}

    zero99 = [a+b for a in string.digits for b in string.digits]
    #result = pmap.pmap(train_search, zero99, nthreads=5)
    result = map(train_search, zero99)
    return web.utils.dictadd(*result)

def get_train_info(train_no):
	try:
		#debug("get_train_info", train_no)
		url = "http://www.indianrail.gov.in/cgi_bin/inet_trnpath_cgi.cgi"
		year, month, day = time.localtime()[:3]
		data = {"lccp_trn_no" : train_no, 
			"lccp_frday": day,
			"lccp_frmonth": month, 
			"lccp_day" : day,
			"lccp_month" : month,
			"lccp_daycnt" : 0
		}
		soup = get_soup(url, data)
		tables = soup.findAll('table')
		a = table2map(tables[2]).values()[0]
		b = table2map(tables[3])
		b = [b[str(k)] for k in sorted(map(int, b.keys()))]
		return (a, b)
	except:
		print "except"
		return "BAD RESULT"

def enquiry_details(soup):
    data =  { "lccp_classopt" : "ZZ",
		"lccp_class1" : "ZZ",
		"lccp_class2" : "ZZ",
		"lccp_class3" : "ZZ",
		"lccp_class4" : "ZZ",
		"lccp_class5" : "ZZ",
		"lccp_class6" : "ZZ",
		"lccp_class7" : "ZZ",
		"lccp_age"    : "ADULT_AGE",
		"lccp_day"    : "30",
		"lccp_month"  : "5",
		"lccp_quota"  : "GN",
		"lccp_trndtl" : "1014 BNC ATP 0YYYYYYY"
	    }
#    data = open('/room/subhan/Desktop/inet_srcdest_cgi_date.cgi1.html').read()
#    soup = BeautifulSoup(data)

    trains  = soup.findAll(attrs = {'name' : re.compile('lccp_trndtl')})
    n =len(trains)-1
    
    other_details = web.storage(day = str(soup.find(attrs={'name':re.compile('lccp_day')})['value'])) 
    other_details.update(month = str(trains[n].next['value'])) 
    other_details.update(quota = "GN") #quota
    other_details.update(age = "ADULT_AGE")  #Age
    class_opt = str(soup.find(attrs = {'name' : re.compile('lccp_classopt')})['value'])


    list_of_trains = []
    for train in trains[:n]:
	d = web.storage(train_details = str(train['value']))
	d.update(number = str(train['value']).split(" ")[0])
	Train_details = train.parent.parent.findAll("td")[1:6]

	train_name  = str(Train_details[0].string)
	if train_name == 'None':
	    train_name  = str(Train_details[0].a.string)
	d.update(name=train_name)	    

	source = str(Train_details[1].string)
	if source == 'None': 
	    source = str(Train_details[1].a.string)
	d.update(origin=source)

	dest = str(Train_details[3].string)
	if not dest:
	    dest = str(Train_details[3].a.string)
	d.update(destination=dest)
	    
	d.update(departure = str(Train_details[2].string))
	d.update(arrival = str(Train_details[4].string))
	klass = train.parent.parent.findAll(attrs={'name':re.compile('lccp_class[o1-7]')})
	d.update(klasses = [str(k['value']) for k in klass])
	list_of_trains.append(d)
	
    return  web.storage(trains=list_of_trains,travelling_details=other_details,klass = class_opt)
    
def print_trans():
    trains = get_all_trains()
    keys = trains.keys()
    keys.sort()
    print "\n".join([",".join(trains[k]) for k in keys])

def print_stations():
    stations = get_all_stations()
    keys = stations.keys()
    keys.sort()
    print "\n".join([",".join(stations[k]) for k in keys])

def print_train_schdule():
		#trains = [row.split(',', 1)[0] for row in open('trains.csv').read().split('\n')]
		#schedules = pmap.pmap(get_train_info, trains, nthreads=10)
		trains = "8463"
		train_details, stations_details = get_train_info(trains)
		for station in stations_details:
			print station

if __name__ == "__main__":
#	running_status("6529", "26/03/2008")
#	print train_between_stations("BNC","ATP",['19','05'],"SL")
	print enquiry_details("url", "data")
