#!/bin/bash
#这个代码完成所有的利用tfidf计算帖子相似度的操作
#分词
if [ $# -ne 1 ];
then
    echo "usage: dis_related.sh type(帖子类型:birthclub, forum, birthclub, local)";
    exit
else
    dis_type=$1
fi
if [ $# -eq 2 ];
then
    startId=$2    
else
    startId=1
fi

svdServer=172.16.10.10
svdUser=lvhongliang
preProcessPath="../preProcess"
postProcessPath="../postProcess"
getSimPath="../getsim" splitWordPath="../splitWord"
linkAnalysePath="../link_analyse"
reverseIndexPath="../reverseIndex"
dataDir="/data/lvhongliang/data/discussion"
#
#type:   同龄圈   birthclub 1\n";
#        大圈子   forum\n";
#        小圈子   group\n";
#        同城     local\n";
#dis_type=5
#1. create directory
echo "step 1: create directory ......"
curDataDir=$dataDir"/"$dis_type
echo $curDataDir
if [ ! -d $curDataDir ]
then
    mkdir -p $curDataDir
fi
if [ $? -ne 0 ] ; then
    echo "error" 
    exit 1 
fi
#2. 只保留应该保留的内容
echo "step 2: generate source file ......"
totalFile=$curDataDir"/discussion"
echo $totalFile
if [ ! -f $totalFile ]
then
    awk -v ageId=$dis_type 'BEGIN{FS="\t";OFS="\t"}{print $1,$2,$3}' $totalFile".filter" > $totalFile
fi
#3. 分词
echo "step 3: split the word!"
if [ ! -f $totalFile".title.split" ];then
    $preProcessPath/splitQuestionsIKA.sh $totalFile
fi
if [ $? -ne 0 ] ; then
    echo "error"
    exit 1
fi

#4. 统计每个词出现的次数
echo "step 4: stat the term count  of every question!"
if [ ! -f $totalFile".stat" ]; then
    php $preProcessPath/statWords.php $totalFile".title.split" $totalFile".content.split" $totalFile".stat" 10
fi
if [ $? -ne 0 ] ; then
    echo "error"
    exit 1
fi

#5. 统计所有的词频
echo "step 5: stat count of every term!"
termStatFile=$curDataDir"/term_stat.txt"
if [ ! -f $termStatFile ];then
    $preProcessPath/statTermFrequency.sh $totalFile".stat" "\t" > $termStatFile
fi
if [ $? -ne 0 ] ; then
    echo "error"
    exit 1
fi

#6. 挑选词频大于5的词
echo "step 6: select the terms which's count is great than 5!"
awk '{if($2>5)print $1}' $termStatFile > $curDataDir"/selectedTerms.txt"
if [ $? -ne 0 ] ; then
    echo "error"
    exit 1
fi

#7.将term，documentId映射成连续化的id
echo "step 7: make the termId and docId sequential"
termMapFile=$curDataDir"/term_id.map"
docMapFile=$curDataDir"/doc_id.map"
$preProcessPath/mapTermToId.sh  $curDataDir"/selectedTerms.txt"  $termMapFile
$preProcessPath/mapTermToId.sh  $totalFile".stat"  $docMapFile
if [ $? -ne 0 ] ; then
    echo "error"
    exit 1
fi

#获得document的数量
docNum=`wc -l $docMapFile | awk '{print $1}'`

#8. 将原来的term-document矩阵转换为termId-docId矩阵
echo "step 8: map the term-dcoment matrix to termId-docId matrix"
if [ ! -f $curDataDir"/mappedDocTerm.txt" ];then
    $preProcessPath/transferId.sh $totalFile".stat" $curDataDir"/mappedDocTerm.txt" $termMapFile $docMapFile 
fi
if [ $? -ne 0 ] ; then
    echo "error"
    exit 1
fi


#9. 计算tf－idf
echo "step 9: caculate the tf idf value"
tfidfFile=$curDataDir"/doc_term_tf_idf.txt"
if [ ! -f $tfidfFile ];then
    php $preProcessPath/convertFrequencyToTFIDF.php $curDataDir"/mappedDocTerm.txt" $tfidfFile
fi
#检查tf_idf 和frequency是否数量一致
if [ $? -ne 0 ] ; then
    echo "error"
    exit 1
fi

#10. 转换成redsvd需要的格式，进行矩阵分解
latentVectorFile=$curDataDir"/doc_latent_vectors.txt"
if [ ! -f $curDataDir"/good.U" ];then
    echo "step 10: change the format of the matrix suitable for the redsvd"
    matrixRedSvd=$curDataDir"/matrix_redsvd.txt"
    echo $tfidfFile
    $preProcessPath/changeMatrixFormatV1.sh  $tfidfFile > $matrixRedSvd
    scp $matrixRedSvd $svdUser@$svdServer:
    if [ $? -ne 0 ] ; then
        echo "error"
        exit 1
    fi

    #11. 登录到lvhl-dev上面进行矩阵分解
#    echo "step 11: ssh to lvhl-dev and do the matrix factorization"
#    ssh $svdUser@$svdServer "redsvd -i ~/matrix_redsvd.txt -o good -r 150 -f sparse"
#    scp $svdUser@$svdServer:"good.U" $curDataDir"/good.U"
    echo "step 11: do the matrix factorization"
    $preProcessPath/redsvd -i $matrixRedSvd -o good -r 150 -f sparse
    mv "good.U" $curDataDir"/good.U"

fi
if [ ! -f $latentVectorFile ];then
#对latent vector进行归一化
    $preProcessPath/standardFactors.sh $curDataDir"/good.U" > $latentVectorFile
    if [ $? -ne 0 ] ; then
        echo "error"
        exit 1
    fi
fi

#12. 聚类
echo "step 12: cluster the documents by their latent vectors!"
clusterMap=$curDataDir"/assignCid.txt" 
clusterSet=$curDataDir"/clusters.txt"
if [ ! -f $clusterMap ];then
    $getSimPath/clusterByLsa.sh $latentVectorFile $clusterMap $clusterSet
fi
if [ $? -ne 0 ] ; then
    echo "error"
    exit 1
fi

#13.计算相似度存储在数据库中和文件中
#(1)准备config file
echo "step 13: calculate the similarity and store to file and database"
echo "step 13.1 create and truncate table"
mysql -h 172.16.9.36 -u babytree -pbabytree discussion -e "create table sim_tfidf_$dis_type like sim_tfidf;"
mysql -h 172.16.9.36 -u babytree -pbabytree discussion -e "truncate table sim_tfidf_$dis_type;"
configFile=$getSimPath"/config/"$dis_type"_config_latent_savesimdb.txt"
docSimFile=$curDataDir"/doc_sim.txt"
#准备将id映射到docId的map文件
id_docMapFile=$curDataDir"/id_doc.map"
awk '{print $2,$1}' $docMapFile > $id_docMapFile
source $getSimPath/genGetSimConfig.sh

configFile=$getSimPath"/config/"$dis_type"_config_tfidf_savesimdb.txt"
docSimFile=$curDataDir"/doc_sim_tfidf.txt"
#准备将id映射到docId的map文件
id_docMapFile=$curDataDir"/id_doc.map"
awk '{print $2,$1}' $docMapFile > $id_docMapFile

#生成title的倒排索引
echo "get reverse Index:"
termNum=`wc -l $termMapFile`
collectionName="discussion."$dis_type
$preProcessPath/getTitleTermMapedRet.sh $dis_type "discussion"  "discussion" # prepare the data
$reverseIndexPath/getReverseIndexAndStore -i $curDataDir"/mappedDocTerm.title.txt" -n  $termNum -c $collectionName
source $getSimPath/genGetSimConfig.sh
if [ ! -f $docSimFile ];then
     writeSimConfig $configFile $docNum $tfidfFile $docSimFile  $startId $docNum $clusterMap $clusterSet "sim_tfidf_"$dis_type $id_docMapFile $collectionName
    echo "***********begin calculating the tfidf similarity !*******"
    #(2)计算
    $getSimPath/getSim_tfidf -c $configFile #1>$curDataDir"/1_getSim.txt" 2>$curDataDir"/2_getSim.txt"
    if [ $? -ne 0 ] ; then
        echo "error"
        exit 1
    fi
fi
#14. 统计入链
echo "step 14: state the inlink of every document"
#trim 掉 doc_sim.txt中的weight信息
$linkAnalysePath/deleteWeight.sh $docSimFile > $curDataDir"/doc_sim_tfidf.trim"
$linkAnalysePath/stat_in_link.sh $curDataDir"/doc_sim_tfidf.trim" > $curDataDir"/tfidf_inlink.txt"
if [ $? -ne 0 ] ; then
    echo "error"
    exit 1
fi

#15. 通过inlink降权得到最终结果
echo "step 15: lower by inlink and get the final result"
inlinkConfigFile=$getSimPath"/config/"$dis_type"_config_updateNeigh_tfidf.txt"
writeInlinkLowConfig $inlinkConfigFile $docNum $curDataDir"/tfidf_inlink.txt" $curDataDir"/doc_sim_tfidf.trim" 10 "sim_tfidf_$dis_type";
$getSimPath/updateNeighbor -c $inlinkConfigFile 1>$curDataDir"/1_update_inlink.txt" 2> $curDataDir"/2_update_inlink.txt" 
if [ $? -ne 0 ] ; then
    echo "error"
    exit 1
fi

#16. 将转换后的id还原 
echo "step 16: restore the mappedId"
inlinkOriFile=$curDataDir"/sim_tfidf_{$dis_type}_step_9.ori" 
$postProcessPath/restoreToOriginalDocId.sh  $docSimFile $docSimFile".ori"  $docMapFile
$postProcessPath/restoreToOriginalDocId.sh  $curDataDir"/sim_tfidf_{$dis_type}_step_9" $inlinkOriFile $docMapFile
if [ $? -ne 0 ] ; then
    echo "error"
    exit 1
fi
#17. deleteWeight
$linkAnalysePath/deleteWeight.sh $docSimFile".ori" > $docSimFile".ori.trim"
$linkAnalysePath/deleteWeight.sh $inlinkOriFile > $inlinkOriFile".trim"

#18. bzip2 file
bzip2 $docSimFile".ori.trim"
bzip2 $inlinkOriFile".trim" 

#19. scp file
echo "step 19: scp the *.ori.trim to s1"
scp $inlinkOriFile".trim.bz2"  lvhongliang@s1:related_data_full/

#20. unzip file
echo "step 20: unzip the *.ori.trim.bz2 to *.ori.trim on s1"
ssh lvhongliang@s1 "bunzip2 ~/related_data_full/${inlinkOriFile}.trim.bz2"

echo "Have finished all the steps, enjoy it!"
exit 0
