#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
@author: caijj
@version: V1.0
@file: identification_verification_code.py
@time: 2022/03/01
"""
import base64
import hashlib
import json
import urllib
import cv2
import numpy as np
import requests
from fake_useragent import UserAgent
from jsonpath import jsonpath
from lxml import etree
from skimage.io import imsave
from config.conf import cm
from PIL import Image
from selenium import webdriver
from tools.timeutil import sleep


# import Image


def agent():
    ua = UserAgent(verify_ssl=False)
    headers = {'User-Agent': ua.random}
    return headers


def chrome_enter_target_page(url='https://newhouse.cnnbfdc.com/?_tsduk=2'):  # chrome进入目标页面
    chrome_options = webdriver.ChromeOptions()
    # chrome_options.add_argument('--headless')
    # chrome_options.add_argument('--disable-gpu')
    web_driver = webdriver.Chrome(options=chrome_options)
    web_driver.maximize_window()
    web_driver.get(url)
    sleep(1)
    return web_driver


def quit(web_driver):  # 退出
    if web_driver is not None:
        web_driver.quit()
        sleep(5)


def get_img_url(web_driver):
    try:
        html_source = web_driver.page_source  # 该属性可以获取当前浏览器的当前页的源码（html）
        tree = etree.HTML(html_source)
        ele = tree.xpath("//a[contains(text(),'如果没有跳转，请手动点击这里')]")
        if len(ele) == 1:
            web_driver.find_element_by_xpath("//a[contains(text(),'如果没有跳转，请手动点击这里')]").click()
        html_source = web_driver.page_source  # 该属性可以获取当前浏览器的当前页的源码（html）
        tree = etree.HTML(html_source)
        img_url = tree.xpath("//div[@id='cnnbfdc-ranking-list']//img/@src")[2]
        return img_url
    except Exception as e:
        quit(web_driver)


def download_img(img_url, img_path):
    headers = agent()  # 构造请求头
    try:
        pic = requests.get(img_url, headers=headers)
        with open(img_path, 'wb') as f:
            f.write(pic.content)
            f.flush()
    except Exception as e:
        print(Exception, ':', e)


def convert_image(img_url, img_path, standard=127.5):
    # 灰度转化
    # I = Image.open(path)
    # image = I.convert('L')
    #
    # # 【二值化】根据阈值 standard , 将所有像素都置为 0(黑色) 或 255(白色), 便于接下来的分割
    # pixels = image.load()
    # for x in range(image.width):
    #     for y in range(image.height):
    #         if pixels[x, y] > standard:
    #             pixels[x, y] = 255
    #         else:
    #             pixels[x, y] = 0
    # image.save(path)
    # im = cv2.imread(path)
    # kernel = 1 / 16 * np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]])
    # im_blur = cv2.filter2D(im, -1, kernel)
    # ret, im_res = cv2.threshold(im_blur, 127, 255, cv2.THRESH_BINARY)
    # cv2.imwrite(path, im_res)

    # img = Image.open(path)
    # for i in xrange(300):
    #     for j in xrange(300):
    #         r, g, b = img.getpixel((i, j))
    #         if (b > g and b > r):  # 对蓝色进行判断
    #             b = 127
    #             g = 127
    #             r = 127
    #         img.putpixel((i, j), (r, g, b))
    # img.show()
    download_img(img_url, img_path)
    Image = cv2.imread(img_path)
    Image2 = np.array(Image, copy=True)
    # 白色
    white_px = np.asarray([255, 255, 255])
    # 黑色
    black_px = np.asarray([0, 0, 0])

    (row, col, _) = Image.shape

    for r in range(row):
        for c in range(col):
            px = Image[r][c]
            # 检测白色，然后替换为黑色
            if all(px == black_px):
                Image2[r][c] = white_px
    imsave(img_path, Image2)


def get_data(img_url, path):
    download_img(img_url, path)
    convert_image(path)
    url = "https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id={" \
          "client_id}&client_secret={client_secret}".format(client_id='Udeqhqc8QRDnj5R537V1Pxzd',
                                                            client_secret='LkxLjA3q9Mx1MpfFE4NdfYPjRNY34wq3')
    res = requests.get(url)

    res = json.loads(str(res.text))
    token = res['access_token']
    temp_url = 'https://aip.baidubce.com/rest/2.0/ocr/v1/accurate_basic?access_token=' + token
    temp_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
    temp_file = open(path, 'rb')
    temp_image = temp_file.read()
    temp_file.close()
    temp_data = {'image': base64.b64encode(temp_image)}
    temp_data = urllib.parse.urlencode(temp_data)
    temp_res = requests.post(url=temp_url, data=temp_data, headers=temp_headers)

    res = json.loads(str(temp_res.text))
    # print(res)
    code = res['words_result'][0]['words']
    return res
    # text = pytesseract.image_to_string(Image.open('xxx.png'), lang='chi_sim')


def aliyun_get_data(img_url, img_path):
    download_img(img_url, img_path)
    convert_image(img_path)
    url = "https://ocrapi-advanced.taobao.com/ocrservice/advanced"
    with open(img_path, 'rb') as f:  # 以二进制读取本地图片
        data = f.read()
        encode_str = str(base64.b64encode(data), 'utf-8')
    params_dict = {'img': encode_str}
    # 请求头
    headers = {
        'Authorization': 'APPCODE bab7dfed269f4d7ab6b2bde8f1b9dc40',
        'Content-Type': 'application/json; charset=UTF-8'
    }
    try:
        # proxies = {
        #     "http": "http://127.0.0.1:8888",
        #     "https": "http://127.0.0.1:8888"
        # }
        params = json.dumps(params_dict).encode(encoding='UTF8')
        # res = requests.post(url=url, data=params, headers=headers, proxies=proxies, verify=False)
        res = requests.post(url=url, data=params, headers=headers, verify=False)
        data_list = jsonpath(res.json(), '$.prism_wordsInfo')[0]
        word_list = [data['word'] for data in data_list]
        return word_list
    except Exception as e:
        print(e)


if __name__ == '__main__':
    path = cm.tmp_file_path('nb.png')
    # driver = chrome_enter_target_page()
    # url = get_img_url(driver)
    # download_img(url, img_path)
    get_data(path)
