# -*- coding: utf-8 -*-
import post
import nntpyenc
import yaml
import filesplit
import par
import os
import os.path
import re
from nzb import NZB
from post import postpart, genmid, gensubject
from mmap import mmap, ACCESS_READ
from math import ceil
from random import SystemRandom
from threading import Thread
from Queue import Queue, Empty
from time import time
rand = SystemRandom()

class ParThread(Thread):
	def __init__(self, queue):
		Thread.__init__(self)
		self.daemon = True
		self.queue = queue
	
	def run(self):
		i = self.queue.get(True)
		par.create_recovery_volumes(**i)
		self.queue.task_done()

class PostThread(Thread):
	def __init__(self, queue, queue2):
		Thread.__init__(self)
		self.daemon = True
		self.queue = queue
		self.queue2 = queue2
		self.current_queue = 'queue'
		self.items_processed = 0
		self.please_die = False
	
	def run(self):
		while True:
			postargs = (None, None)
			try:
				if self.items_processed == 0:
					postargs = self.queue.get(True)
				else:
					postargs = self.queue.get(True, 20)
				self.items_processed += 1
			except Empty:
				if self.current_queue != 'queue2':
					print 'Queue get timed out. Switching to retry queue.'
					self.queue = self.queue2
					self.current_queue = 'queue2'
				else:
					print 'Retry queue timed out.'
				if self.please_die:
					print 'Thread dead.'
					return
				continue
			try:
				print 'Posting %s (%d/%d).' % (postargs['fname'], postargs['partno'], postargs['parts'])
				reply = postpart(**postargs)
				if reply.split(' ')[0] != '240':
					raise Exception(reply)
			except BaseException as e:
				print e
				print 'Exception while posting %s (%d/%d). Requeueing.' % (postargs['fname'], postargs['partno'], postargs['parts'])
				self.queue.task_done()
				self.queue2.put(postargs)
				print 'Requeued %s (%d/%d).' % (postargs['fname'], postargs['partno'], postargs['parts'])
			else:
				print '%s (%d/%d) posted successfully.' % (postargs['fname'], postargs['partno'], postargs['parts'])
				self.queue.task_done()

def post_single_set(threads, cfginfo, postinfo):
	filelist = postinfo['files']
	del postinfo['files']
	postoptions = {}
	try: 
		for i in postinfo['options']:
			postoptions[i.keys()[0]] = i.values()[0]
		del postinfo['options']
	except KeyError:
		pass
	queue = Queue(threads + 1)
	queue2 = Queue(9001)
	
	post_size = cfginfo['chars_per_line'] * cfginfo['lines_per_part']
	
	# option: basepath
	os.chdir(postoptions.get('basepath', '.'))
	
	# start worker threads
	postThreads = []
	for i in xrange(threads):
		a = PostThread(queue, queue2)
		a.start()
		postThreads.append(a)
	
	files = 0
	
	# pre-queue processing: split hueg files implicitly
	# suggestion 1: 655360000 = 625 MiB = lcm(1 MiB, 5000*128) # each file will be an exact multiple of a binary megabyte
	# suggestion 1.5: 1350 MiB = 655360000*2 = 1310720000
	# suggestion 2: 104960000 ~= 100 MiB = 164*5000*128
	# suggestion 3: 25000 parts (think it's the max per file) = 16000000000 bytes
	# 0 = don't split, ever (not recommended)
	filelist2 = []
	for i in filelist:
		filelist2 += filesplit.getbounds(i, postoptions.get('split', 16000000000))
	# number of files being posted (different from # of files on disk if split)
	split_files = len(filelist2)
	files += split_files

	# Find the longest common prefix for all files, and check if it's the entire filename without extension or part### stuff for ALL files.
	# If there's only a single file to post, keep the extension of the original file in the basename for nzb & par2. Otherwise usenet-indexers
	# don't group the file + par2's + nzb in a single collection.
	if (files == 1):
		gen_base_name = os.path.basename(filelist[0])
	else:
		basenames = map(os.path.basename, filelist)
		basenames = [os.path.splitext(basename)[0] for basename in basenames]
		basenames = [re.sub("\.part\d+$", '', basename) for basename in basenames]
		gen_base_name = os.path.commonprefix(basenames)
		for basename in basenames:
			if gen_base_name != basename:
				gen_base_name = postinfo['comment']
				break

	# pre-queue processing: par2 generation
	par_redundancy = postoptions.get('par_redundancy', -1)
	par_name = ''
	par_files = 0
	par_parms = None
	parQueue = Queue(1)
	if par_redundancy != -1: # -1 means don't even do the verification file
		par_name = postoptions.get('par_name', gen_base_name + '.par2')
		par_parms = par.prepare_recovery_volumes(filelist, par_name, par_redundancy, post_size * 2) # TODO: allow config of this
		par_files = 1 + par_parms['r_file_count']
		files += par_files
		# start it in the background
		parQueue.put(par_parms)
		ParThread(parQueue).start()
	
	# pre-queue processing: nzb preparation (part 1)
	nzbobj = NZB(cfginfo['h_from'], postinfo['newsgroups'].split(',')) # easier to just build it in memory then chuck it away than "if"s everywhere
	nzb_files = 0
	nzb_name = postoptions.get('nzb_name', gen_base_name + '.nzb')
	if not postoptions.get('no_nzb', 0): # not no nzb :DDDD
		nzb_files = 1
		files += nzb_files
	
	# queue main files
	postinfo['files'] = files
	for j in xrange(split_files):
		thisfile = filelist2[j]
		fname = thisfile['name']
		
		print 'Started processing %s.' % fname
		
		# f = open(thisfile['path'], 'rb')
		# data1 = mmap(f.fileno(), 0, access=ACCESS_READ)
		
		f = thisfile['f']
		data1 = thisfile['m']
		data = data1[thisfile['begin']:thisfile['end']]
		
		fsize = len(data)
		# print 'fsize: %d' % fsize
		fileno = j + 1
		# files =
		parts = (fsize + post_size - 1) / post_size # ensure round up without having to go floating point
		fileinfo = {'fname': fname, 'fileno': fileno, 'fsize': fsize, 'parts': parts}
		nzbobj.add_file(gensubject(postinfo['comment'], fileno, files, fname, 1, parts))
		for i in xrange(parts):
			partinfo = {}
			partinfo.update(cfginfo)
			partinfo.update(postinfo)
			partinfo.update(fileinfo)
			begin = post_size * i
			end = post_size * (i + 1)
			if end > fsize:
				end = fsize
			partdata = data[begin:end]
			mid = genmid(i + 1, parts)
			nzbobj.add_segment(len(partdata), i + 1, mid)
			assert len(partdata) == end - begin
			if len(partdata) % post_size != 0:
				assert len(partdata) == fsize % post_size
			partinfo.update({'data': partdata, 'partno': i + 1, 'mid': mid})
			#args = (server, port, un, pwd, h_from, h_sender, newsgroups, comment, fileno, files, fname, i + 1, parts, genmid(i + 1, parts), chars_per_line, lines_per_part, fsize, partdata)
			print 'Queueing %s (%d/%d).' % (partinfo['fname'], partinfo['partno'], partinfo['parts'])
			queue.put(partinfo)
			print 'Queued %s (%d/%d).' % (partinfo['fname'], partinfo['partno'], partinfo['parts'])
		nzbobj.finished_file()
		# data1.close()
		# f.close()
	
	
	# post-queue processing: posting par2 files. use queue2 in case poster threads have already switched
	if par_redundancy != -1:
		# first make sure they're actually done creating
		parQueue.join()
		par_dir = par_parms['par_dir']
		filelist3 = os.listdir(par_dir)
		filelist3.sort()
		assert len(filelist3) == par_files
		for i in xrange(par_files):
			fileno_offset = split_files
			fileno = fileno_offset + i + 1
			fname = filelist3[i]
			f = open(os.path.join(par_dir, fname), 'rb')
			data = mmap(f.fileno(), 0, access=ACCESS_READ)
			fsize = len(data)
			parts = (fsize + post_size - 1) / post_size # ensure round up without having to go floating point
			fileinfo = {'fname': fname, 'fileno': fileno, 'fsize': fsize, 'parts': parts}
			nzbobj.add_file(gensubject(postinfo['comment'], fileno, files, fname, 1, parts))
			for i in xrange(parts):
				partinfo = {}
				partinfo.update(cfginfo)
				partinfo.update(postinfo)
				partinfo.update(fileinfo)
				begin = post_size * i
				end = post_size * (i + 1)
				if end > fsize:
					end = fsize
				partdata = data[begin:end]
				mid = genmid(i + 1, parts)
				nzbobj.add_segment(len(partdata), i + 1, mid)
				assert len(partdata) == end - begin
				if len(partdata) % post_size != 0:
					assert len(partdata) == fsize % post_size
				partinfo.update({'data': partdata, 'partno': i + 1, 'mid': mid})
				print 'Queueing %s (%d/%d).' % (partinfo['fname'], partinfo['partno'], partinfo['parts'])
				queue2.put(partinfo)
				print 'Queued %s (%d/%d).' % (partinfo['fname'], partinfo['partno'], partinfo['parts'])
			nzbobj.finished_file()
			data.close()
			f.close()
			
	# post-queue processing: saving + posting nzb file
	if nzb_files:
		fileno_offset = split_files + par_files
		fileno = fileno_offset + 1
		fname = filesplit.name_from_path(nzb_name)
		data = str(nzbobj)
		fsize = len(data)
		with open(nzb_name, 'wb') as f:
			f.write(data)
		parts = (fsize + post_size - 1) / post_size # ensure round up without having to go floating point
		fileinfo = {'fname': fname, 'fileno': fileno, 'fsize': fsize, 'parts': parts}
		for i in xrange(parts):
			partinfo = {}
			partinfo.update(cfginfo)
			partinfo.update(postinfo)
			partinfo.update(fileinfo)
			begin = post_size * i
			end = post_size * (i + 1)
			if end > fsize:
				end = fsize
			partdata = data[begin:end]
			mid = genmid(i + 1, parts)
			assert len(partdata) == end - begin
			if len(partdata) % post_size != 0:
				assert len(partdata) == fsize % post_size
			partinfo.update({'data': partdata, 'partno': i + 1, 'mid': mid})
			print 'Queueing %s (%d/%d).' % (partinfo['fname'], partinfo['partno'], partinfo['parts'])
			queue2.put(partinfo)
			print 'Queued %s (%d/%d).' % (partinfo['fname'], partinfo['partno'], partinfo['parts'])
		
	
	print 'Joining...'
	queue.join()
	print 'Joining retry queue which has approximately %d parts.' % queue2.qsize()
	queue2.join()
	print 'Done! Killing threads.'
	for i in postThreads:
		i.please_die = True
	print 'Freeing mmaps and file handles'
	for i in filelist2:
		try:
			i['m'].close()
		except:
			pass
		try:
			i['f'].close()
		except:
			pass
			
def main(argv):
	starttime = time()
	# 0: prog name
	# 1: threads
	# 2: config info file
	# 3: post info file
	post_single_set(int(argv[1]), yaml.load(open(argv[2], 'rb').read()), yaml.load(open(argv[3], 'rb').read()))
	
	
if __name__ == '__main__':
	from sys import argv
	main(argv)