#! /usr/bin/env python

# Created by J. Hao @ ETS, Dec. 2013

# this code grab the log files from the ftp server and check length, completion

import glob as gl, pandas as pd, numpy as np, collections as cl, pylab as pl 
import rpy2.robjects as robjects,xml.etree.ElementTree as ET
import os
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
from datetime import datetime
import datetime as dt
from rpy2.robjects.packages import importr
from matplotlib import colors 
from matplotlib.dates import date2num, num2date, drange
import pandas.rpy.common as com
#from nltk import word_tokenize
from sklearn.feature_extraction.text import CountVectorizer
import nltk


#psychometric = importr('psychometric')
#mirt = importr('mirt')
#eRm = importr('eRm')
#can't find where're the packages, need to ask Jiangang

scienceMCkey=np.array([2,4,4,1,2,2,4,3,1,1,4,3,1,4,3,1,3,1,1,4,4,2,3,3,4,3,4,2,2,3,1,3,3,1,3,4,2,3,2,3])


def logicalOR(ll):
    if type(ll) != list:
        sys.exit('--the argument must be a list')
    else:
        n = len(ll)
        ok = ll[0]
        for idx in ll[1:]:
            ok = np.logical_or(ok,idx)
        return ok


def getAmtID_List(logdir=None, success=None):
    '''
    this funciton calculate the number of uniqe amtIDs that work on the task based on the log files.
    '''
    if logdir == None:
        logdir = os.getcwd()+'/'
    else:
        logdir = logFileDir
    if success == None:
        logFname = gl.glob(logdir+'*CPSSingle.xml')
    else:
        logFname = gl.glob(logdir+'*Success.xml')
    logFname.sort()
    AmtID = []
    Time = []
    Attempt = []
    duplicate=0 
#if id1<id2, duplicate=0, the log file counts; if id1>id2, it's a duplicate, leave it out. 
    for fname in logFname:
        fname = fname.split('/')[-1].split('_')
        if fname[0].strip().upper()<fname[1].strip().upper():
            duplicate=0
        else:
            duplicate=1
            continue#if the current file is from the second player, move on without executing the following lines
        AmtID.append(fname[0].strip().upper())
        if fname[4] == 'AM' or fname[4] == 'PM':
            Time.append(datetime.strptime(fname[2]+'-'+fname[3]+'-'+fname[4],'%m-%d-%Y-%H-%M-%S-%p'))
        else:
            Time.append(datetime.strptime(fname[2]+'-'+fname[3],'%m-%d-%Y-%H-%M-%S'))
            #Time.append(fname[1]+'_'+fname[2])
    freq = cl.Counter(AmtID)
    unqAmtID = freq.keys()
    attempt = freq.values()
    unqTime = []
    for amtid in unqAmtID:
        unqTime.append(Time[AmtID.index(amtid)])
    data=pd.DataFrame(np.array([unqAmtID,attempt,unqTime]).T, columns=['amtID','attempt','Time'])
    #dateRange = pd.date_range('12/11/2013','12/26/2013')
    dateRange = pd.date_range('12/26/2013','06/06/2014')
#revised for the new tetralogue data collection
    dateRangeStr = []
    for day in dateRange:
        dateRangeStr.append(str(day.date()))
    numtime = date2num(data.Time)
    pl.figure(figsize=(15,9))
    pl.subplot(1,2,1)
    his = pl.hist(numtime,bins=date2num(dateRange),alpha=0.3)
    pl.xticks(his[1],dateRangeStr,rotation=45)
    pl.ylabel('number of participants per day')
    pl.grid()
    pl.subplot(1,2,2)
    his = pl.hist(numtime,bins=date2num(dateRange),cumulative=True,alpha=0.3)
    pl.xticks(his[1],dateRangeStr,rotation=45)
    pl.ylabel('Cumulative number of participants')
    pl.grid()
    if success == None:
        data.to_excel('amtID.xls',index=False)
        pl.figtext(0.5,0.95,'All Logs: '+str(len(data)))
        pl.savefig('participant_freq_all.png')
    else:
        data.to_excel('amtID_success.xls',index=False)
        pl.figtext(0.5,0.95,'Successful Logs: '+str(len(data)))
        pl.savefig('participant_freq_success.png')
    return data

def getTagValue(root):
    ntag = len(root)
    value=[]
    for child in root:
        value.append(child.text)
    return value  

def xml2DataFrame(xmlfile):
    '''
    convert the xml file to a dataframe
    '''
    tree = ET.parse(xmlfile)
    root = tree.getroot()
    #dtfmt='%m/%d/%Y %H:%M:%S %p'
    #dtfmt='%m/%d/%Y %H:%M:%S'
    #amtid = xmlfile.split('/')[-1].split('_')[0]
    amtid1= xmlfile.split('/')[-1].split('_')[0]#new
    amtid2= xmlfile.split('/')[-1].split('_')[1]#new
    if amtid1>amtid2:#new
        pairID=amtid1+amtid2#new
    else:#new
        pairID=amtid2+amtid1#new

    actionName=[]
    actionTime =[]
    actionBy=[]
    actionTo=[]
    actionResult=[]
    playerID=[]
    slideName=[]
    talkAllowed=[]#to mark relevant vs. irrelevant discussion (chatMessage)
    talkOK=0#talkOK=1 when chatMessage happens after 'Begin Discussion' before 'Begin Team Response'
    actionRoot = root[1][1:]
    for child in actionRoot:
        value = getTagValue(child)
       # if value[0]!='chatMessage':
       #     continue
        actionName.append(value[0])
        if value[0]=='Begin Discussion':
            talkOK=1
        elif value[0]=='Begin Team Response':
            talkOK=0
        #if judgment: chatMessage is relevant between the above two actionName's
            
        try:
            actionTime.append(datetime.strptime(value[1],'%m/%d/%Y %H:%M:%S'))
#value[1] is supposed to be the date&time, check; confirmed
        except ValueError:
            actionTime.append(datetime.strptime(value[1],'%m/%d/%Y %H:%M:%S %p'))#this condition is true in the current tetralogue files
        actionBy.append(value[2])
        actionTo.append(value[3])
        actionResult.append(value[4])
        slideName.append(value[5])
        talkAllowed.append(talkOK)
        playerID.append(pairID)

    actionTime = np.array(actionTime)
    sessionTime = [(actionTime[i] - actionTime[0]).seconds for i in range(len(actionTime))]
    data = pd.DataFrame(np.array([playerID,sessionTime,actionName,actionBy,actionTo,actionResult,slideName,talkAllowed]).T,index=actionTime,columns=['playerID','sessionTime','actionName','actionBy','actionTo','actionResult','slideName','talkAllowed'])
    return data

def aggregrateXML(logdir=None,success=None):
    '''
    aggregrate the xml files for trialogue
    '''
    if logdir == None:
        logdir = os.getcwd()+'/'
    else:
        logdir = logFileDir
    if success == None:
        logFname = gl.glob(logdir+'*CPSSingle.xml')
    else:
        logFname = gl.glob(logdir+'*Success.xml')
    logFname.sort()
    data = []
    subjectPool = []#new
    for f in logFname:
        dataEach=xml2DataFrame(f)#new
        pairID=dataEach['playerID'][0]#new
        if pairID in subjectPool:#new
            continue#new
        else:#new
            subjectPool.append(pairID)#new
            data.append(dataEach)#new
        #data.append(xml2DataFrame(f))
    data = pd.concat(data,ignore_index=True)
    if success == None:
        data.to_excel('aggregrated_log_all.xls')
    else:
        data.to_excel('aggregrated_log_Success.xls')
    return data

def selectChatMessage(logdir=None,success=None):
    '''
    this function selects chat messages between the two players from the aggregated xml file.
    '''
    if logdir == None:
        logdir = os.getcwd()+'/'
    else:
        logdir = logFileDir
    if success == None:
        logFname = gl.glob(logdir+'*CPSSingle.xml')
    else:
        logFname = gl.glob(logdir+'*Success.xml')
    logFname.sort()
    data = []
    subjectPool = []
    for f in logFname:
        dataEach=xml2DataFrame(f)#new
        pairID=dataEach['playerID'][0]#new
        if pairID in subjectPool:#new
            continue#new
        else:#new
            subjectPool.append(pairID)#new
            data.append(dataEach)#new
        #data.append(xml2DataFrame(f))  
    data = pd.concat(data,ignore_index=True)
    data = data[data['actionName']=='ChatMessage']
    data['actionResult'] = data['actionResult'].apply(lambda x: pd.Series(x.split(':',1)[1]))
#these two lines differ this function from aggregrateXML 
    if success == None:
        data.to_excel('aggregrated_log_all.xls')
    else:
        data.to_excel('aggregrated_log_Success.xls')
    return data

def ngram(data):
    '''
    this function returns ngram analysis based on the output of selectChatMessage()
    '''
    n = 2
    chatMessage = data['actionResult']
    strMsg = ''
    for i in range(0,len(chatMessage)):
        strMsg+=' ' + chatMessage.iloc[i]

    ngram_vectorizer = CountVectorizer(ngram_range=(1,n),token_pattern=r'\b\w+\b')
    analyze = ngram_vectorizer.build_analyzer()
    ngram = analyze(strMsg)
    fdist=nltk.FreqDist(ngram)
    item = []
    freq = []
    for gram in fdist.keys():
        item.append(gram)
        freq.append(fdist[gram])
    ngramRst = pd.DataFrame(np.array([item,freq]).T, columns=['item','frequency'])
    ngramRst.to_excel('ngram_Result.xls')
    #return ngramRst
    return ngramRst



def binaryScore(responseMatrix,key):
    '''
    This function return the binary score for an item with many responses and the answer key to that item
    '''
    score=[]
    nitem = len(key)
    for i in range(nitem):
        score.append(np.intp(responseMatrix[:,i]==key[i]))
    score = np.array(score).T
    return score
        
def itemExamine(score):
    '''
    This function return the item correlation, difficulty, discrimination. 
    The input score is a binary matrix/dataframe, rows are students, cols are items
    '''
    b = psychometric.item_exam(score,discrim=True)
    colnames = [b.names[1],b.names[2],b.names[3],b.names[4]]
    data = pd.DataFrame(np.array([b[1],b[2],b[3],b[4]]).T,columns=colnames)
    return data


def makeDemographicPie(data):
    races = ['White','African American', 'Asian','American Indian','Hawaiian or Pacific Islander','Hispanic/Latino']
    white = data.icol(1).sum()
    black = data.icol(2).sum()
    asian = data.icol(3).sum()
    Amidian = data.icol(4).sum()
    hawaiian = data.icol(5).sum()
    hispanic = data.icol(6).sum()
    pl.pie([white,black,asian,Amidian,hawaiian,hispanic],labels=races,autopct='%.2f')
    pl.savefig('races_pie.png')
    pl.close()
    gender = ['Male','Female']
    male = np.intp(data.icol(7) == 1).sum()
    female = np.intp(data.icol(7) == 2).sum()
    pl.pie([male,female],labels=gender,autopct='%.2f')
    pl.savefig('gender_pie.png')
    pl.close()
    
def getMCscoreData():
    filename = '/home/jghao/research/tetralogue/amtdata/mcitems/mc_result_all/Raw_Data_for_Science_MC.xlsx'
    xls = pd.ExcelFile(filename)
    data = xls.parse("Sheet1")
    unqID,unqIdx = np.unique(data.icol(0), return_index=True) 
    data = data.irow(unqIdx)
    unqID = data.icol(0)
    amtID=[]
    [amtID.append(t.upper()) for t in unqID]
    makeDemographicPie(data)
    idxSMCstart = 31
    idxSMCend = 70
    std = []
    score = []
    for i in range(idxSMCstart,idxSMCend+1):
        std.append(np.std(data.icol(i)))
        score.append(np.intp(data.icol(i) == scienceMCkey[i-idxSMCstart]))
    score = np.array(score).T
    nitem = score.shape[1]
    nobs = score.shape[0]
    probCorrectItem = score.sum(axis=0)/float(nobs)
    dfscore=pd.DataFrame(score)
    dfscore['amtID']=amtID
    dfscore.to_csv('MC_score_amtID.csv',sep=',',index=False)

def mcItemAnalysis(filename=None):
    if filename == None:
        filename = '/home/jghao/research/tetralogue/amtdata/mcitems/mc_result_all/Raw_Data_for_Science_MC.xlsx'
    xls = pd.ExcelFile(filename)
    data = xls.parse("Sheet1")
    unqID,unqIdx = np.unique(data.icol(0), return_index=True) 
    data = data.irow(unqIdx)
    unqID = data.icol(0)
    makeDemographicPie(data)
    idxSMCstart = 31
    idxSMCend = 70
    std = []
    score = []
    for i in range(idxSMCstart,idxSMCend+1):
        std.append(np.std(data.icol(i)))
        score.append(np.intp(data.icol(i) == scienceMCkey[i-idxSMCstart]))
    score = np.array(score).T
    nitem = score.shape[1]
    nobs = score.shape[0]
    probCorrectAll =  score.sum()/float(nitem*nobs)
    probCorrectItem = score.sum(axis=0)/float(nobs)
    pl.figure(figsize=(12,8))
    pl.bar(np.arange(nitem),probCorrectItem,alpha = 0.5)
    pl.ylim(-0.1,1.3)
    pl.grid(color='g')
    pl.plot(9.5,0.5,'r*')
    pl.plot(26.5,0.5,'r*',label='attention check')
    pl.plot(38.5,0.5,'r*')
    pl.ylabel('Prob. of correct response')
    pl.title('Prob. of correct for ALL: '+str(round(probCorrectAll,3)))
    pl.legend()
    pl.savefig('prob_correct.png')
    pl.close()
    response = np.array(data.ix[:,idxSMCstart:idxSMCend+1])
    cmap=colors.ListedColormap(['red','orange','blue','green'])
    bounds=[0.5,1.5,2.5,3.5,4.5]
    norm = colors.BoundaryNorm(bounds,cmap.N)
    pl.figure(figsize=(10,15))
    img = pl.matshow(response,origin='lower',cmap=cmap,norm=norm,fignum=0)
    pl.colorbar(img, cmap=cmap, norm=norm, boundaries=bounds, ticks=[1,2,3,4])    
    pl.xticks(np.arange(nitem),(np.arange(nitem)+1).astype('str'))
    pl.yticks(np.arange(nobs),(np.arange(nobs)+1).astype('str'))
    pl.grid(color='c')
    pl.xlabel('item')
    pl.ylabel('# observations')
    pl.title('Norminal responses to items')
    pl.savefig('response_distribution.png')
    pl.close()  
    cmap=colors.ListedColormap(['red','green'])
    bounds=[0,0.5,1.5]
    norm = colors.BoundaryNorm(bounds,cmap.N)
    pl.figure(figsize=(10,15))
    img = pl.matshow(score,origin='lower',cmap=cmap,norm=norm,fignum=0)
    pl.colorbar(img, cmap=cmap, norm=norm, boundaries=bounds, ticks=[0,1])    
    pl.xticks(np.arange(nitem),(np.arange(nitem)+1).astype('str'))
    pl.yticks(np.arange(nobs),(np.arange(nobs)+1).astype('str'))
    pl.grid(color='y')
    pl.xlabel('item')
    pl.ylabel('# observations')
    pl.title('Scores for item')
    pl.savefig('score_distribution.png')
    pl.close()
    itemIdx = np.arange(score.shape[1])
    itemIdxOK = (itemIdx != 9)*(itemIdx != 26)*(itemIdx != 38)
    da = itemExamine(score[:,itemIdxOK])
    da.to_excel('itemProperty.xls',float_format='%5.3f')
    print 'alpha = '+str(psychometric.alpha(score[:,itemIdxOK]))
    np.savetxt('score.txt',score[:,itemIdxOK],fmt='%d')
    np.savetxt('response.txt',response[:,itemIdxOK].astype('int'),fmt='%d')
    
    #---demographic info
    
    
def amtID_compare_mc_log():
    '''
    this compare the amtID from log file and from MC response
    '''
    fileMC = '/home/jghao/research/tetralogue/amtdata/mcitems/mc_result_all/Raw_Data_for_Science_MC.xlsx' 
    fileLog = '/home/jghao/research/tetralogue/amtdata/logfiles/data_collection_all/SessionLogs/CPSSingle/amtID.xls'
    fileLogSuccess = '/home/jghao/research/tetralogue/amtdata/logfiles/data_collection_all/SessionLogs/CPSSingle/amtID_success.xls'
    xlsMC = pd.ExcelFile(fileMC)
    dataMC = xlsMC.parse("Sheet1",skiprows=15)
    amtID_mc_temp = list(dataMC.icol(0))
    amtID_mc = []
    [amtID_mc.append(tt.upper()) for tt in amtID_mc_temp]
    time_mc = list(dataMC.icol(71))
    #unqIdxMC = np.unique(amtID_mc,return_index=True)[1]
    #amtIDunq_mc = amtID_mc[unqIdxMC]
    #timeunq_mc = time_mc[unqIdxMC]
    xlsLog = pd.ExcelFile(fileLog)
    xlsLogSuccess = pd.ExcelFile(fileLogSuccess)
    dataLog = xlsLog.parse("sheet1")
    dataLogSuccess = xlsLogSuccess.parse("sheet1")
    amtID_log = list(dataLog.icol(0))
    time_log = list(dataLog.icol(2))
    amtID_logSuccess = list(dataLogSuccess.icol(0))
    amtID_noLog = np.setdiff1d(amtID_mc, amtID_log)
    time_noLog=[]
    time_noMC=[]
    for amtid in amtID_noLog:
        time_noLog.append(time_mc[amtID_mc.index(amtid)])
    df = pd.DataFrame(np.array([amtID_noLog,time_noLog]).T)
    df.to_excel('amtid_no_log.xls')
    amtID_noMC = np.setdiff1d(amtID_log,amtID_mc)
    for amtid in amtID_noMC:
        time_noMC.append(time_log[amtID_log.index(amtid)])
    dfmc = pd.DataFrame(np.array([amtID_noMC,time_noMC]).T)  
    dfmc.to_excel('amtid_no_mc.xls')
    
    amtID_noLogSuccess = np.setdiff1d(amtID_mc, amtID_logSuccess)
    time_noLogSuccess=[]
    time_noMCSuccess=[]
    for amtid in amtID_noLogSuccess:
        time_noLogSuccess.append(time_mc[amtID_mc.index(amtid)])
    df = pd.DataFrame(np.array([amtID_noLogSuccess,time_noLogSuccess]).T)
    df.to_excel('amtid_no_log_success.xls')
    amtID_noMCSuccess = np.setdiff1d(amtID_logSuccess,amtID_mc)
    for amtid in amtID_noMCSuccess:
        time_noMCSuccess.append(time_log[amtID_logSuccess.index(amtid)])
    dfmc = pd.DataFrame(np.array([amtID_noMCSuccess,time_noMCSuccess]).T)  
    dfmc.to_excel('amtid_no_mc_success.xls')
    pl.plot(time_noLog,np.ones(len(time_noLog)),'b|')
    

    
def aggregrate_unsuccessful_log():
    fileLog = '/home/jghao/research/tetralogue/amtdata/logfiles/data_collection_all/SessionLogs/CPSSingle/amtID.xls'
    fileLogSuccess = '/home/jghao/research/tetralogue/amtdata/logfiles/data_collection_all/SessionLogs/CPSSingle/amtID_success.xls'
    xlsLog = pd.ExcelFile(fileLog)
    xlsLogSuccess = pd.ExcelFile(fileLogSuccess)
    dataLog = xlsLog.parse("sheet1")
    dataLogSuccess = xlsLogSuccess.parse("sheet1")
    amtID_log = list(dataLog.icol(0))
    amtID_logSuccess = dataLogSuccess.icol(0)
    amtID_logFail = np.setdiff1d(amtID_log, amtID_logSuccess)
    data = []
    for amtid in amtID_logFail:
        logFname = gl.glob(amtid+'*.xml')
        print logFname
        if len(logFname) > 0:
            for f in logFname:
                data.append(xml2DataFrame(f))
        else:
            print '----with only success log ---', amtid
    data = pd.concat(data,ignore_index=True)
    data.to_excel('aggregrated_log_fail.xls')
    return data

def fitIRT(): 
    fileDIR='/home/jghao/research/tetralogue/amtdata/mcitems/mc_result_all/'
    score = np.genfromtxt(fileDIR+'score.txt')
    
    
    
    
    
    
    
    
    
