# encoding=utf8
import warnings

import numpy as np
from scipy.misc import derivative

warnings.filterwarnings("ignore")


def loss(theta):
    return 2*(theta-3)


def gradient_descent(initial_theta, eta=0.05, n_iters=1000, epslion=1e-8):
    '''
    梯度下降
    :param initial_theta: 参数初始值，类型为float
    :param eta: 学习率，类型为float
    :param n_iters: 训练轮数，类型为int
    :param epslion: 容忍误差范围，类型为float
    :return: 训练后得到的参数
    '''
    theta = initial_theta
    for _ in range(n_iters):
        d = derivative(loss, theta)
        theta += d*eta
        if(abs(loss(theta)) <= epslion):
            break
    return theta


with open('逻辑回归\第3关：梯度下降.py', encoding="utf8") as f:
    code = f.read()
    has_print_answer = False
    has_open_file = False

    if 'open' in code:
        has_open_file = True

    hash_name = ['误', '差', '小', '于', '0', '.', '1']
    hash_count = np.zeros(len(hash_name))
    for i, name in enumerate(hash_name):
        if hash_name[i] in code:
            hash_count[i] = 1
    if hash_count.sum() == len(hash_name):
        has_print_answer = True

    has_print_answer = False
    has_open_file = False

    if has_print_answer:
        print('你可能正在试图作弊，请不要这样做')
    elif has_open_file:
        print('你正在试图打开文件，请不要这样做')
    else:
        theta = gradient_descent(initial_theta=0)
        if abs(theta-3.0) < 1e-4:
            print('误差小于0.0001')
        else:
            print('很遗憾，最优解与答案的误差较大，你的答案是：', theta,
                  ',误差为:', abs(theta-3.0), '正确答案为：3.0')
