#This script can be directly run from the command line.
#It splits a dataset into two datasets after a certain number of posts have been read.

import sys
import io
import re
import shlex
import Parser
import Post
import FileIO

# Let's try using config file. Taras.
from Resources.config import SPLIT_AT
	
def main():
	fileStream =FileIO.openDataSetFile(FileIO.READ_MODE)
	
	#Comment the following line out if you are reading a file that does not contain a header.
	Parser.readHeader(fileStream)
	
	postCount = 0
	trainingFileStream = FileIO.openTrainingFile(FileIO.WRITE_MODE)
	testingFileStream = FileIO.openTestingFile(FileIO.WRITE_MODE)
	
	#Loop over every post in the file
	while (not Parser.isEndOfFile(fileStream)):
		newPost = Parser.readPost(fileStream)
		
		#Used to measure progress
		print str(postCount) + "   " + newPost.postID
		
		if(postCount<=SPLIT_AT):
			trainingFileStream.write(newPost.toLine())
		else:
			testingFileStream.write(newPost.toLine())
		postCount = postCount + 1
		
	trainingFileStream.close()
	testingFileStream.close()
	print "DONE!"
	
main()