import fasttext


# 没有任何优化的情况，效果很差
# (('__label__baking',), array([0.05682746]))
# (('__label__baking',), array([0.06429368]))
# (3000, 0.13433333333333333, 0.058094277065013696)


# 自动超参数调优:
# 实际生产中多标签多分类问题的损失计算方式:

def unoptimized():
    model = fasttext.train_supervised('./train.txt')

    answer = model.predict('which baking dish is best to bake a banana bread?')
    print(answer)

    answer = model.predict("Why not put knives in the dishwasher?")
    print(answer)
    result = model.test('./test.txt')
    print(result)


def optimized_preprocess():
    print('第1步，执行sed -e "s/\([.\!?,\'/()]\)/ \1 /g"，将标点符号前后都增加一个空格')
    model = fasttext.train_supervised('./train1.txt')

    answer = model.predict('which baking dish is best to bake a banana bread?')
    print(answer)

    answer = model.predict("Why not put knives in the dishwasher?")
    print(answer)
    result = model.test('./test1.txt')
    print(result)


def optimized_more_epoch():
    print('第2步，增加训练轮次')
    model = fasttext.train_supervised('./train1.txt', epoch=50)

    answer = model.predict('which baking dish is best to bake a banana bread?')
    print(answer)

    answer = model.predict("Why not put knives in the dishwasher?")
    print(answer)
    result = model.test('./test1.txt')
    print(result)


def optimized_big_lr():
    print('第3步，调整学习率:')
    model = fasttext.train_supervised('./train1.txt', epoch=50, lr=1.0)

    answer = model.predict('which baking dish is best to bake a banana bread?')
    print(answer)

    answer = model.predict("Why not put knives in the dishwasher?")
    print(answer)
    result = model.test('./test1.txt')
    print(result)


def optimized_ngram():
    print('第4步，增加n-gram特征:')
    model = fasttext.train_supervised('./train1.txt', epoch=50, lr=1.0, wordNgrams=2)

    answer = model.predict('which baking dish is best to bake a banana bread?')
    print(answer)

    answer = model.predict("Why not put knives in the dishwasher?")
    print(answer)
    result = model.test('./test1.txt')
    print(result)


def optimized_loss():
    print('第5步，修改损失计算方式:')
    model = fasttext.train_supervised('./train1.txt', epoch=50, lr=1.0, wordNgrams=2, loss="hs")

    answer = model.predict('which baking dish is best to bake a banana bread?')
    print(answer)

    answer = model.predict("Why not put knives in the dishwasher?")
    print(answer)
    result = model.test('./test1.txt')
    print(result)


def optimized_loss2():
    print('第6步，自动超参数调优')
    model = fasttext.train_supervised('./train1.txt', autotuneValidationFile='./test1.txt', autotuneDuration=300)

    answer = model.predict('which baking dish is best to bake a banana bread?')
    print(answer)

    answer = model.predict("Why not put knives in the dishwasher?")
    print(answer)
    result = model.test('./test1.txt')
    print(result)


def optimized_loss3():
    print('第7步，实际生产中多标签多分类问题的损失计算方式')
    model = fasttext.train_supervised('./train1.txt', lr=0.2, epoch=30, wordNgrams=2, loss='ova')

    answer = model.predict('which baking dish is best to bake a banana bread?', k=-1, threshold=0.5)
    print(answer)

    answer = model.predict("Why not put knives in the dishwasher?", k=-1, threshold=0.5)
    print(answer)
    result = model.test('./test1.txt')
    print(result)


if __name__ == '__main__':
    # unoptimized()

    print('开始优化.... ')
    # optimized_preprocess()
    # optimized_more_epoch()
    # optimized_big_lr()
    # optimized_ngram()
    # optimized_loss()
    optimized_loss2()
    # optimized_loss3()

"""
    最终结果： 
Read 0M words
Number of words:  14543
Number of labels: 735
Progress: 100.0% words/sec/thread:   25345 lr:  0.000000 avg.loss: 10.033611 ETA:   0h 0m 0s
(('__label__baking',), array([0.05641369]))
(('__label__baking',), array([0.06436238]))
(3000, 0.132, 0.05708519532939311)
开始优化.... 
第1步，执行sed -e "s/\([.\!?,'/()]\)/  /g"，将标点符号前后都增加一个空格
Read 0M words
Number of words:  8952
Number of labels: 735
Progress: 100.0% words/sec/thread:   26436 lr:  0.000000 avg.loss:  9.939075 ETA:   0h 0m 0s
(('__label__baking',), array([0.08209024]))
(('__label__food-safety',), array([0.10315481]))
(3000, 0.17766666666666667, 0.07683436644082456)
第2步，增加训练轮次
Read 0M words
Number of words:  8952
Number of labels: 735
Progress: 100.0% words/sec/thread:   28509 lr:  0.000000 avg.loss:  5.038415 ETA:   0h 0m 0s
(('__label__baking',), array([0.89016408]))
(('__label__equipment',), array([0.12254736]))
(3000, 0.5626666666666666, 0.24333285281822115)
第3步，调整学习率:
Read 0M words
Number of words:  8952
Number of labels: 735
Progress: 100.0% words/sec/thread:   28338 lr:  0.000000 avg.loss:  4.941858 ETA:   0h 0m 0s
(('__label__bananas',), array([0.83819145]))
(('__label__knives',), array([0.50235903]))
(3000, 0.5863333333333334, 0.2535678247080871)
第4步，增加n-gram特征:
Read 0M words
Number of words:  8952
Number of labels: 735
Progress: 100.0% words/sec/thread:   27080 lr:  0.000000 avg.loss:  2.551675 ETA:   0h 0m 0s
(('__label__bananas',), array([0.93062955]))
(('__label__knives',), array([0.42475852]))
(3000, 0.6333333333333333, 0.27389361395415884)
第5步，修改损失计算方式:
Read 0M words
Number of words:  8952
Number of labels: 735
Progress: 100.0% words/sec/thread:  616943 lr:  0.000000 avg.loss:  1.873325 ETA:   0h 0m 0s
(('__label__bananas',), array([0.69552433]))
(('__label__knives',), array([0.66583145]))
(3000, 0.5956666666666667, 0.25760415165056944)

Process finished with exit code 0

第6步，自动超参数调优
Progress: 100.0% Trials:   20 Best score:  0.353427 ETA:   0h 0m 0s
Training again with best arguments
Read 0M words
Number of words:  8952
Number of labels: 735
Progress: 100.0% words/sec/thread:    5917 lr:  0.000000 avg.loss:  5.367946 ETA:   0h 0m 0s
(('__label__baking',), array([0.85826552]))
(('__label__knives',), array([0.36466342]))
(3000, 0.5946666666666667, 0.2571716880495892)

第7步，实际生产中多标签多分类问题的损失计算方式
Read 0M words
Number of words:  8952
Number of labels: 735
Progress: 100.0% words/sec/thread:   30447 lr:  0.000000 avg.loss:  6.348834 ETA:   0h 0m 0s
(('__label__baking', '__label__equipment', '__label__bananas'), array([1.00001001, 0.89626139, 0.71224219]))
((), array([], dtype=float64))
(3000, 0.602, 0.260343087790111)

"""
