#!/usr/bin/python
# -*- coding: utf-8 -*-

import feedparser
 
class Collector:
	
	#def __init__(self):
	 
	def parser(self, xmlsrclst, update=0):
		
		
		entrylist = []
		categorylist = []
		resultlist = []
		
		if update == 0:
			xmlsrc = xmlsrclst[0]
			print xmlsrc
			d = feedparser.parse(xmlsrc)
			entry = d.entries
		else:
			entry=xmlsrclst
		for i in xrange(len(entry)):
				
			if entry[i].has_key("tags"): ##i am collecting only tagged blog entires
				 tags = entry[i].tags
				 categorylist = [x["term"] for x in tags]
			else:
				continue ##after a training part this if else blog should change, cos i want to classify all entries
			
			if entry[i].has_key("content"): 
				content = entry[i].content
				entrylist.append(content[0].value)
			elif entry[i].has_key("summary_detail"): 
				content = entry[i].summary_detail
				entrylist.append(content.value)
			elif entry[i].has_key("summary"): 
				content = entry[i].summary
				entrylist.append(content.value)
			else: content = ""
			try:
				updated = entry[i].updated
			except:
				updated = None
			
			entrylist.append(updated)
			
			try:
				title = entry[i].title
			except:
				title = None
			
			entrylist.append(title)
			
			#try:
			author = xmlsrclst[-1]
			#except:
				#author = None
				
			entrylist.append(author)
				
			try:
				link = entry[i].link
			except:
				link = None
				
			entrylist.append(link)
				
			entrylist.append(categorylist)
			resultlist.append(entrylist)
			entrylist = []
			categorylist = []	
			
		return resultlist 
		
	def createfiles(self, resultlist, check=0):
		
		try:
			import os
			os.mkdir("dirs")
			os.chdir("dirs")
		except:
			os.chdir("dirs")
		
		for lst in resultlist:
			tags = lst[-1]
		
			for i in tags:
				
				if i.find("/") != -1:
					index=i.find("/")
					i=i[:index]+"-"+i[index+1:]
				
				if i.lower() == "muzik": i = "müzik" #collect all music related things in the same dir
				if i.lower().find("film") != -1: i = "film-dizi" ## merge film and film-tv category to film-dizi
				if i.lower().find("sinema") != -1: i="sinema-tiyatro"
				if i.lower().find("firefox") != -1: i="firefox"
				if i.lower().find("google") != -1: i = "google"
				if i.lower().find("ata") != -1: i = "atatürk"
				if i.lower().find("sources.list") != -1: i = "debian"
				if i.lower().find("ruby") != -1: i = "programlama"
				if i.lower().find("linux") != -1: i = "linux"
				if i.lower().find("debian") != -1: i = "debian"
				if i.lower().find("seminer") != -1: i = "seminer"
				if i.lower().find("php") != -1: i = "programlama"
				if i.lower().find("python") != -1: i = "programlama"
				if i.lower().find("mono") != -1: i = "programlama"
				if i.lower().find("quake") != -1: i = "oyun"
				if i.lower().find("hack") != -1: i = "geek"
				if i.lower().find("gmail") != -1: i = "google"
				if i.lower().find("wesnoth") != -1: i = "oyun"
				if i.lower().find("gülelim") != -1: i = "geyik"
				if i.lower().find("fıkra") != -1: i = "geyik"
				if i.lower().find("gezegen") != -1: i = "gezegen"
				if i.lower().find("kde") != -1: i = "masaüstü"
				if i.lower().find("gnome") != -1: i = "masaüstü"
				if i.lower().find("xfce") != -1: i = "masaüstü"
				if i.lower().find("desktop") != -1: i = "masaüstü"
				if i.lower().find("gnu") != -1: i = "masaüstü"
				if i.lower().find("vista") != -1: i = "microsoft"
				if i.lower().find("movie") != -1: i = "film-dizi"
				if i.lower().find("blog") != -1: i = "günlüksel"
				if i.lower().find("site Öneri") != -1: i = "güzel site"
				if i.lower().find("hayat") != -1: i = "yaşam"
				if i.lower().find("web2") != -1: i = "web-2-3"
				if i.lower().find("web 2") != -1: i = "web-2-3"
				if i.lower().find("web3") != -1: i = "web-2-3"
				if i.lower().find("web 3") != -1: i = "web-2-3"
				if i.lower().find("ajax") != -1: i = "web-2-3"
				if i.lower().find("plugin") != -1: i = "eklenti"
				if i.lower().find("superman") != -1: i = "sinema-tiyatro"
				if i.lower().find("gunluk") != -1: i = "günlüksel"
				if i.lower().find("Çizgi roman") != -1: i = "çizgi roman"
				if i.lower().find("acik kaynak") != -1: i="açık kaynak"
				 
				try:
					os.mkdir(i.lower())
					os.chdir(i.lower())
				except OSError:
					os.chdir(i.lower())
				
				filename = lst[-2]
				
				filename = filename.replace("/","--")
					
				if check != 0:
					filename = filename + "girdi"
					
				f = open(filename, "w")
				
				if lst[-3] == None: lst[-3] = "İsimsiz Yazar"
				f.write(lst[-3])
				f.write("\n")
				if lst[-4] == None: lst[-4] = "Belirtilmemiş Başlık"
				f.write(lst[-4])
				f.write("\n")
				f.write(lst[-5])
				f.write("\n")
				
				f.write(lst[0])
				f.write("\n")
				os.chdir("..")
				f.close()
		os.chdir("..")
	
	def openxmlsrc(self, filename):
		rslt=[]
		f = open(filename, "r")
		lst = f.readlines()
		li = [x.split(" ") for x in lst]
		for x in li:
			l = [i.strip() for i in x]
			l[-1] = " ".join(l[-1].split("-"))
			rslt.append(l)
		return rslt
			
	def checker(self, xmlsrclst):
		
		print xmlsrclst
		
		d = feedparser.parse(xmlsrclst[2])
		entry1 = d.entries
		
		date1 = entry1[0].updated
		link1 = entry1[0].link
		
		e = feedparser.parse(xmlsrclst[0])
		
		entry2 = e.entries
		date2 = entry2[0].updated
		link2 = entry2[0].link
		
		if date1 == date2 and link1 == link2: ##i am not interested with the reedited entires
			return False 
		else:
			return True
	
	def update(self, xmlsrclst):## take the new entries only excet from the ones in xml source
		
		entry=[]
		
		d=feedparser.parse(xmlsrclst[0])
		exentry=d.entries
		exdate=exentry[0].updated
		
		dd=feedparser.parse(xmlsrclst[2])
		newentry=dd.entries
		
		for x in newentry:
			if x.updated == exdate: break
			else: entry.append(x)
		
		import os 
		
		lst=xmlsrclst[0].split(xmlsrclst[1])
		filename=lst[1].split("/")
		
		os.chdir(lst[0]+xmlsrclst[1])
		import datetime
		time=datetime.datetime.now()
		suffix="-"+str(time.month)+":"+str(time.day)+"-"+str(time.hour)+":"+str(time.minute)
		os.popen("mv "+filename[1]+" "+filename[1]+suffix)
		os.popen("wget "+xmlsrclst[2]) ## i always download the currecnt xml source but keep the old entires also
		
		os.chdir("..")
		os.chdir("..")
		
		return entry
#show me the number of files under each category so i will see whether there are enough files for training	
				
if __name__=="__main__":
	
	import collectorentry
	c = collectorentry.Collector()
	lst = c.openxmlsrc("xml-resources")
	#freq = c.cfgread("venga.cfg")
	
	#import time
	
	#while(True):
		
		#flag = True
	
		#if freq[0]=="day":
			#sleeptime=int(2)##int(freq[1])*24*60*60##freq[1]*24*60*60 tru thing
	for files in lst:
			
		if files == ['']: continue ## in the xml-resources new line at the end of file couses an empty list
			
			#if flag == True:
		src = c.parser(files)
		c.createfiles(src, 1)
	
