#!/usr/bin/python

import urllib2
from xml.dom import minidom
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email import Encoders
import feedparser
import os

#d = feedparser.parse('./Ikea.rss')
##d = feedparser.parse('http://boston.craigslist.org/search/gbs/sss?hasPic=1&query=ikea&s=0&format=rss')
##print d['feed']['title']
#
##for m in d.keys():
##	print m
#
##print d['entries']
#
#for m in d['entries']:
#	print m







gmail_user = "wulicious.service@gmail.com"
gmail_pwd = ""

def mail(to, subject, text, attachList):
	msg = MIMEMultipart()

	msg['From'] = gmail_user
	msg['To'] = to
	msg['Subject'] = subject

	msg.attach(MIMEText(text))


	for each in attachList:
		part = MIMEBase('application', 'octet-stream')
		part.set_payload(open(each , 'rb').read())
		Encoders.encode_base64(part)
		part.add_header('Content-Disposition',
				  'attachment; filename="%s"' % os.path.basename(each))
		msg.attach(part)


	mailServer = smtplib.SMTP("smtp.gmail.com", 587)
	mailServer.ehlo()
	mailServer.starttls()
	mailServer.ehlo()
	mailServer.login(gmail_user, gmail_pwd)
	mailServer.sendmail(gmail_user, to, msg.as_string())
	# Should be mailServer.quit(), but that crashes...
	mailServer.close()


def read_rss_feed(url, fileName):
	print('Reading the rss fead')
	response = urllib2.urlopen(url)
	fin = open(fileName, 'w')
	fin.write(response.read())
	fin.close()


def fetch_page(url):
	print('fetching url :' + url)
	response = urllib2.urlopen(url)
	result = response.readlines()
	for i in result:
		if i.find('imgList = [') != -1:
			return i
	return None

def add_to_list(file_name, url):
	print('adding to list :' + file_name)
	if not os.path.exists(file_name):
		print 'Error : ' + file_name + ' does not exist'
	d = feedparser.parse(file_name)

	new_file_name = file_name.replace('.rss','')
	html = '\tfunction ' + new_file_name + '(){\n' 
	html += '\tval = "<br>"\n'

	for i in d['entries']:
		title = i.title
		title = title.replace('"',' ')
		title = title.replace("'",' ')
		html += '\tval += "' + title + '<br>"\n'

		#try:

		imgList = fetch_page(i['link'])	
		if imgList == None: continue
		imgList = imgList.replace('imgList = ["', '')
		imgList = imgList.replace('"];','')
		imgList = imgList.split(',')

		for j in imgList:
			html += '\tval += \'<a href="' + i['link'] + '">\'\n'
			j = j.replace('"','')
			j = j.strip()
			html += '\tval += \'<img width=100 height=100 src="' + j + '">\'\n'
			html += '\tval += \'</a>\'\n'
		
		html += 'val += \'<br><br>\'\n'
		#except:
		#	continue

	html += '\tdocument.getElementById("content").innerHTML = val\n'
	html += '\t}\n'

	return html
#
#	return html




view_list = {}

view_list['rugs.rss'] = 'http://boston.craigslist.org/search/sss?hasPic=1&maxAsk=60&query=rug&s=0&format=rss'
view_list['Ikea.rss'] = 'http://boston.craigslist.org/search/gbs/sss?hasPic=1&query=ikea&s=0&format=rss'
view_list['bjursta.rss'] = 'http://boston.craigslist.org/search/gbs/sso?hasPic=1&query=bjursta&s=0&format=rss'
view_list['shoeshelf.rss'] ='http://boston.craigslist.org/search/gbs/sss?hasPic=1&maxAsk=50&query=shoe%20shelf&s=0&format=rss'
view_list['dinningRoom.rss'] ='http://boston.craigslist.org/search/gbs/sss?hasPic=1&maxAsk=200&query=dinning%20room%20table&s=0&format=rss'
#view_list['freeShelf.rss'] = 'http://boston.craigslist.org/search/sss?hasPic=1&maxAsk=15&query=shelf&srchType=A&format=rss'
#view_list['patio.rss'] = 'http://boston.craigslist.org/search/sss?hasPic=1&maxAsk=150&query=patio&srchType=A&format=rss'
#view_list['outdoorTable.rss'] = 'http://boston.craigslist.org/search/sss?hasPic=1&maxAsk=150&query=outdoor%20table&srchType=T&format=rss'
view_list['highTopTable.rss'] = 'http://boston.craigslist.org/search/sso?hasPic=1&maxAsk=100&query=high%20top%20table&srchType=T&format=rss'
#view_list['freePiano.rss'] = 'http://boston.craigslist.org/search/sso/gbs?hasPic=1&maxAsk=30&query=free%20piano&srchType=T&format=rss'
#view_list['night_stand.rss'] = 'http://boston.craigslist.org/search/sss?catAbb=sss&hasPic=1&query=night%20stand&s=0&format=rss'
#view_list['side_table.rss'] = 'http://boston.craigslist.org/search/sss/gbs?catAbb=sss&hasPic=1&query=side%20table&s=0&format=rss'
view_list['pingpong.rss'] = 'http://boston.craigslist.org/search/sss?catAbb=sss&hasPic=1&maxAsk=50&query=ping%20pong&s=0&format=rss'


for i, j in view_list.items():
	read_rss_feed(j, i)


fin = open('listing.html','w')
html = '<html>\n'
html += '<head>\n'
html += '<script type="text/javascript">\n'

for i ,j in view_list.items():
	try:
		print i, j
		html += add_to_list(i, j)
	except Exception, e:
		print e

html += '</script>\n'
html += '</head>\n'

html += '<body>\n'

for i ,j in view_list.items():
	item_name = i.replace('.rss','')
	html += '\t<a href="javascript:' + item_name + '()">' + item_name + '</a><br>\n'


html += '\t<hr>\n'
html += '\t<div id=content></div>\n'
html += '</body></html>'
html = unicode(html).encode("utf-8")

fin.write(html)
fin.close()





#d = {'juliusctw@gmail.com':'chieh', 'kathiaek@gmail.com':'kathia'}
#for i, j in d.items():
#	mail(i, 'hello ' + j, ' here is the new craigslist', ['./listing.html'])






