# -*- coding:utf-8 -*-
import  os
import cv2
import numpy as np
import pandas as pd
from collections import Counter
import jieba
import math


class textPreDeal:
    def __init__(self):
        self.catlabel=['zixun','seqing','fadong'];
        self.wordbycat={};
        self.dataframe=0;
        self.featurewords={};
        self.catpathDict={};
        self.catpathDict['stop_path']='';
        self.catpathDict['ad_src']='/work/scrapy/data/textcat/ad_src';
        self.catpathDict['ad_src_train_path']=os.path.join(self.catpathDict['ad_src'],'train.csv');
        self.catpathDict['ad_src_test_path']=os.path.join(self.catpathDict['ad_scr'],'test.csv');
        self.catpathDict['ad_src_test_path']='';
        self.catpathDict['ad_model']='/work/scrapy/data/textcat/ad_model';
        self.catpathDict['ad_src_svm']='/work/scrapy/data/textcat/ad_src_svm';
        self.stwlist = self.createstoplist(self.catpathDict['stop_path']);
    #统计当前doc中的词频
    def freqword(self,linelist):
        freword={}
        for term in linelist:
            if term in freword:
                count=freword[term];
                freword[term]=count+1;
            else:
                freword[term]=1;
        return freword;

    def tf_idf(self,linelist):
        tf=0;
        idf=0;
        outdic={};
        dic=self.freqword(linelist);
        for term in linelist:
            tf=dic[term]/len(linelist);
            idf=math.log(len(self.dataframe)/self.wordindoccount(term,self.dataframe));
            tfidf=tf*idf;
            outdic[term]=tfidf;
        orderdic=sorted(outdic.iteritems(), key=lambda d: d[1], reverse=True);
        return  orderdic;

    def buildsvm(self):
        self.featurewords=self.featureword();
        df=pd.DataFrame({i for i in len(self.featurewords)});
        for index,row in self.dataframe:
            featureline=row['tezheng'];
            featurelist=featureline.split(',');
            for j  in featurelist:
                term=featurelist[j].split(':')[0];
                idf=featurelist[j].split(':')[1];
                #计算尺寸归一
                lastidf=idf;
                #df.loc[index]={i+'' for i in len(self.featurewords):'0'};
                df.loc[index] = {i:0 for i in len(self.featurewords)};
                row[self.featurewords[term]]=lastidf;
        df.to_csv('svm_pre.csv');


    def getcorpbycat(self,labels,reader):
        wordbycat={};
        for label in labels:
            word={label:[row.split(',') for row in reader[reader['fenci']==label]]};
        word['all']=[row.split(',') for row in reader['fenci']];
        return wordbycat;
    #查出包含该词的文档数量
    def wordindoccount(self,word,dataframe):
        count=0;
        for index, row in dataframe.iteritems():
            words=row['fenci'].split(',');
            if word in words:
                count=count+1;
        return count;


    def createstoplist(self,stopwordspath):
        stwlist = [line.strip()
                   for line in open(stopwordspath, 'r', encoding='utf-8').readlines()]
        return stwlist;
    def prefiles(self):
        #分词(预处理)
         self.fenciofcat();
        #提取词袋
         self.featureword();


    def featureword(self):
         words=self.dataframe['tezheng'];
         wordsset=set([words[i].split(',') for i in words ]);
         towords=[i for i in wordsset];
         worddict={towords[i]:i for i in towords};
         return worddict;


    def fenciofcat(self):
        typepath = os.path.join(self.catpathDict['ad_src'], type);
        dataheader = ['text', 'label', 'fenci','tezheng'];
        reader = pd.read_csv(typepath, sep=',', header=0, names=dataheader,
                             skip_blank_lines=True);
        for index, row in reader.iteritems():
            # 对每行进行分词，中间使用,分割
            fenci = ",".join(jieba.cut(row['text']));
            # 过滤挺用词
            fenci = self.stopwd();
            linelist=fenci.split(',');
            temp=self.tf_idf(linelist);
            tezheng=",".join(tezheng);
            row['tezheng']=temp;
        return reader;

    def stopwd(self,inputline,outputline):
        inputv=inputline.split(',');
        outtv=[];
        for term in inputv:
            if(term) not in self.stwlist:
                outtv.append(term);
        return ",".join(outtv);

textpre=textPreDeal;
textpre.prefiles();