
# coding=utf-8
import numpy as np
import os
import random

from dataRequire import *
def loadDataSet():
    postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
                 ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
                 ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
                 ['stop', 'posting', 'stupid', 'worthless', 'garbage'],
                 ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
                 ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
    classVec = [0,1,0,1,0,1]    #1 is abusive, 0 not
    return postingList,classVec

def getVocabulary(dataSet):
    vocabulary = set([])
    for words in dataSet:
        for word in words:
            vocabulary.add(word)
    return list(vocabulary)

def wordsExistVec(total , inputList):
    words = np.zeros(len(total))
    for i in range(len(total)):
        if total[i] in inputList:
            words[i] = 1
    return words
# trainData就是留言分词后的训练数据(就是loadDataSet()里的postingList)
# vocabulary就是所有独立单词的列表,classVector表示每一个留言它所处的分类
def trainDataset(trainData , vocabulary , classVector):
    row = len(trainData)
    wordsVector = []
    for i in range(row):
        wordsVector.append(wordsExistVec(vocabulary , trainData[i]))

    vector0pro = np.ones(len(vocabulary)) ; vector1pro = np.ones(len(vocabulary))
    class0Cnt = 2; class1Cnt = 2
    for i in range(row):
        if classVector[i]==0:
            vector0pro += wordsVector[i]
            class0Cnt += sum(wordsVector[i])
        else:
            vector1pro += wordsVector[i]
            class1Cnt += sum(wordsVector[i])

    vector0pro /= float(class0Cnt)
    vector1pro /= float(class1Cnt)
    vector0pro = np.log(vector0pro)
    vector1pro = np.log(vector1pro)
    p0 = sum(classVector) / float(len(classVector))
    return p0,vector0pro,vector1pro


# testData是要检测分类的数据,这个函数只能是处理二项分类
def classify(p0, vector0pro, vector1pro, vocabulary, testData):
    wordsVector = wordsExistVec(vocabulary , testData)
    # wordsVector
    for i in range(len(wordsVector)):
        vector0pro[i]*=wordsVector[i]
        vector1pro[i]*=wordsVector[i]
    pro0 = sum(vector0pro)+np.log(p0)
    pro1 = sum(vector1pro)+np.log(p0)
    print "probality: ",pro0,pro1
    if pro0 > pro1:
        return 0
    else:
        return 1


# 将特征的序列转化为数值序列
def features2value(features):
    personalFeature = []
    for i in range(len(features)):
        if features[i] == choices[i][0]:
            personalFeature.append(0)
        elif features[i] == choices[i][1]:
            personalFeature.append(1)
        else:
            personalFeature.append(2)
    return personalFeature


# 去噪,对于有些人他会为了方便只选择其中某一项快速解决问题,或者是它所选择的条目没有完全,
# 又或者是有些人故意只选择其中某一项导致获取到的数据信息不准确,我们在这里假定80%以上的条目都选择了同一列就视为噪音
# 这些信息都应该视作噪音被删除
# 最后结果返回所有可用数据的数值矩阵(包含0,1,2),以及非噪声用户的标号vectorId
def denoising(datasets):
    newDatasets = []
    vectorId = []
    id = 0
    for data in datasets:
        id+=1
        if len(data)<len(items):
            print "编号为",id," 的数据被去噪: " , data
            continue
        perData = []
        cnt = [0,0,0]
        for i in range(len(items)):
            if data[i] == choices[i][0]:
                perData.append(0)
                cnt[0]+=1
            elif data[i] == choices[i][1]:
                perData.append(1)
                cnt[1]+=1
            else:
                perData.append(2)
                cnt[2]+=1

        if cnt[0]*1.25>float(len(items)) or cnt[1]*1.25>float(len(items)) or cnt[1]*1.25>float(len(items)):
            print "编号为",id," 的数据被去噪: " , data
            continue
        newDatasets.append(perData)
        vectorId.append(id)

    return newDatasets,vectorId

# trainDataMat是训练数据转化为数值后的矩阵,vocabulary就是所有独立单词的列表,
# classVector表示每一个用户它所处的分类0表示不是好友关系,1表示是好友关系
# users表示这里面对应的trainDataMat每一行是对应哪一个编号用户的数据
# user表示是处理和user这个用户的关系
def trainDataset(trainDataMat, classVector, users ,user=0):
    row = len(trainDataMat)

    vector0pro = np.ones(len(trainDataMat[0])) ; vector1pro = np.ones(len(trainDataMat[0]))
    class0Cnt = 2; class1Cnt = 2;
    for i in range(row):
        if users[i] == user:
            continue
        if classVector[i]==0:
            vector0pro += trainDataMat[i]
            class0Cnt += sum(trainDataMat[i])
        else:
            vector1pro += trainDataMat[i]
            class1Cnt += sum(trainDataMat[i])

    vector0pro /= float(class0Cnt)
    vector1pro /= float(class1Cnt)
    vector0pro = np.log(vector0pro)
    vector1pro = np.log(vector1pro)
    p0 = sum(classVector) / float(len(classVector))
    return p0,vector0pro,vector1pro

# vectorPro 多少行就说明有多少种分类情况,这里是二元分类,所以行数为2,
# vectorUser是用户数据转化成数值后的向量
# 返回分类的值以及每一种分类下的概率向量
def bayesClassifyFriend(p0 , vectorPro , vectorUser):
    proVec = []
    minpro = -1000000000.0
    indexMin = -1
    for i in range(len(vectorPro)):
        curpro = 0
        for j in range(len(vectorUser)):
            curpro += vectorUser[j]*vectorPro[i][j]
        curpro += np.log(p0)
        if curpro>minpro:
            indexMin = i
            minpro = curpro
        proVec.append(curpro)
    return indexMin,proVec

# 将某一组测试集的数据进行分类
def checkSingleDataClassify(p0, vectorPro, path='testData/test1.txt'):
    fp = open(path)
    fp.readline()
    features = fp.readline()
    features = features.split('\t')
    features = features2value(features)
    classifyRes,probalities = bayesClassifyFriend(p0,vectorPro,features)

    fp.close()
    return classifyRes,probalities

# 将测试集中的所有数据进行分类
def checkDatasetClassify(p0, vectorPro, dir='testData/',pre='test', tail='.txt'):
    # 判断文件目录是否存在
    if not os.path.exists(dir):
        print '你查找的文件目录'+dir+'不存在'

    classifyResults = []
    probalities = []
    index = 1
    while os.path.exists(dir+pre+str(index)+tail):
        classifyRes,probality = checkSingleDataClassify(p0,vectorPro,dir+pre+str(index)+tail)
        classifyResults.append(classifyRes)
        probalities.append(probality)
        index += 1
    print "测试集合一共有",index-1,"组数据"

    return classifyResults,probalities


# 随机抽取,用于之后多组测试进行交叉验证
# 在总的dataSetMat形成的数值矩阵中随机抽取randomCnt个出来组成新的矩阵,vectorId是对应的矩阵行代表的用户id
def randomGetDataset(dataSetMat, vectorId, randomCnt):
    i = 0
    retMat = []
    retId = []
    visit = set([])
    size = 0
    while i<randomCnt:
        j = int(random.random()*len(dataSetMat))
        visit.add(j)
        if len(visit)>size:
            i += 1
            retMat.append(dataSetMat[j])
            retId.append(vectorId[j])
        size = len(visit)
    return retMat,retId



# 下方是整个基本贝叶斯跑出来的流程
def baseRunning():
    loadItemsChoices()
    datasets = getDataset()
    newDatasets,vectorId = denoising(datasets)

    randomDataset,randomId = randomGetDataset(newDatasets,vectorId,1000)

    # 我们要处理的mainUser这个用户的朋友关系
    mainUser = 1
    vectorFriend = getFriendVector(mainUser, randomId)

    print "debug---------------------"
    print vectorFriend
    p0, vector0pro, vector1pro = trainDataset(randomDataset, vectorFriend, randomId, mainUser)
    vectorPro = []
    vectorPro.append(vector0pro)
    vectorPro.append(vector1pro)
    classifyResults,b = checkDatasetClassify(p0, vectorPro)
    print sum(classifyResults)

#下方是随机抽取的贝叶斯跑出来的结果
def randomRunning(randomCnt , rate=2):
    loadItemsChoices()
    datasets = getDataset()
    newDatasets,vectorId = denoising(datasets)
    # 下方的参数用以记录贝叶斯比例的总值,最后通过除以均值进行比较


    randomDataset,randomId = randomGetDataset(newDatasets,vectorId,randomCnt)

    # 我们要处理的mainUser这个用户的朋友关系
    mainUser = 1
    vectorFriend = getFriendVector(mainUser, randomId)
    print vectorFriend

    print "debug---------------------"
    print vectorFriend
    p0, vector0pro, vector1pro = trainDataset(randomDataset, vectorFriend, randomId, mainUser)
    vectorPro = []
    vectorPro.append(vector0pro)
    vectorPro.append(vector1pro)
   # checkSingleDataClassify(p0 , vectorPro , 'testData/test5.txt')
    classifyResults,probilities = checkDatasetClassify(p0, vectorPro)
    sumProbilities = probilities
    standardCnt = sum(classifyResults)
    print "standard ",standardCnt
    for i in range(10):
        randomDataset,randomId = randomGetDataset(newDatasets,vectorId,randomCnt/rate)

        # 我们要处理的mainUser这个用户的朋友关系
        mainUser = 1
        vectorFriend = getFriendVector(mainUser, randomId)
        print vectorFriend

        print "debug---------------------"
        print vectorFriend
        p0, vector0pro, vector1pro = trainDataset(randomDataset, vectorFriend, randomId, mainUser)
        vectorPro = []
        vectorPro.append(vector0pro)
        vectorPro.append(vector1pro)
       # checkSingleDataClassify(p0 , vectorPro , 'testData/test5.txt')
        classifyResults,probilities = checkDatasetClassify(p0, vectorPro)

        # 为了防止抽取的数据随机到特殊情况以致误差极大
        if standardCnt-sum(classifyResults)>25 or sum(classifyResults)-standardCnt>25:
            continue
        sumProbilities += probilities

    classifyResults = []
    for val in probilities:
        if val[0] < val[1]:
            classifyResults.append(0)
        else:
            classifyResults.append(1)
    print classifyResults
    print sum(classifyResults)

randomRunning(1000,2)