import os
from selenium import webdriver
import time
import requests
from bs4 import BeautifulSoup
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

#百度通用翻译API,不包含词典、tts语音合成等资源，如有相关需求请联系translate_api@baidu.com
# coding=utf-8
import http.client
import hashlib
import urllib
import random
import json
import re
import asyncio
from PIL import Image
from mylogger import Mylogger
from ossutill import OssUtil
from wxcloudrun.views import sendToWxgzh, get_minimum_color, calculate_color_percentages, get_max_color_triplet,upLoadImage,sendPictureArticle
import poimage

log = Mylogger()
oss = OssUtil('wx-gzh-szl', 'LTAI5tN5LyYbmZVitZCVtAyy', 'lRe85lftnHFN8moj5ydwIWyaKJyofK')
async def flip_image(file_path, flip_method):
    # 打开图片文件
    with open(file_path, "rb") as f:
        log.printLog('打开图片文件')
        # 打开图片
        image = Image.open(file_path)
        # 翻转图片
        image_flipped = image.transpose(method=flip_method)
        # 关闭原始图片
        image.close()
        tmp_file_path = file_path.replace('mydata', 'mydata_tmp')
        # 保存翻转后的图片
        image_flipped.save(tmp_file_path)
        # 关闭翻转后的图片
        image_flipped.close()
        # 将翻转后的图片数据写入原始图片文件并关闭文件
        with open(tmp_file_path, "rb") as f:
            data = f.read()
        with open(file_path, "wb") as f:
            f.write(data)
    # oss.upload_file_to_oss(file_path)
# 手动录入翻译内容
def translateChineseToEnglish(q):
    # 手动录入翻译内容
    sign = appid + q + str(salt) + secretKey
    sign = hashlib.md5(sign.encode()).hexdigest()
    myurl = myurl + '?appid=' + appid + '&q=' + urllib.parse.quote(q) + '&from=' + fromLang + '&to=' + toLang + '&salt=' + str(salt) + '&sign=' + sign

    # 建立会话，返回结果
    httpClient = http.client.HTTPConnection('api.fanyi.baidu.com')
    httpClient.request('GET', myurl)
    # response是HTTPResponse对象
    response = httpClient.getresponse()
    result_all = response.read().decode("utf-8")
    try:
        time.sleep(1)
        a = json.loads(result_all)
        return re.sub(r'\(.*?\)|（.*?）', '', a['trans_result'][0]['dst'])
    except Exception as e:
        log.printLog("【" + article_link + "】")
        log.printLog(a)
        log.printLog(e)
        return None;

# 模拟向下滚动，加载页面
def pageScroll(driver):
    SCROLL_PAUSE_TIME = 1.0  # 滚动的间隔时间
    last_height = driver.execute_script('return document.body.scrollHeight')
    while True:
        # 模拟向下滚动到页面底部
        driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')

        # 等待页面加载
        time.sleep(SCROLL_PAUSE_TIME)

        # 计算新的滚动高度并检查是否已经到达页面底部
        new_height = driver.execute_script('return document.body.scrollHeight')
        if new_height == last_height:
            break
        last_height = new_height

# 删除文件
def removeFile(file_path):
    try:
        os.remove(file_path)
        log.printLog(img_name + '删除成功！')
    except FileNotFoundError:
        log.printLog(f"文件{file_path}不存在。")
    except PermissionError:
        log.printLog(f"没有权限删除文件{file_path}。")
    except OSError as e:
        log.printLog(f"删除文件{file_path}时发生错误：{str(e)}")

def main1():
    # 百度appid和密钥需要通过注册百度【翻译开放平台】账号后获得
    appid = '20230701001730597'        # 填写你的appid
    secretKey = '1UXhQelh_QrlnaWnR3TC'    # 填写你的密钥
    httpClient = None
    myurl = '/api/trans/vip/translate'  # 通用翻译API HTTP地址
    fromLang = 'auto'       # 原文语种
    toLang = 'en'           # 译文语种
    salt = random.randint(32768, 65536)

    # 初始化 Chrome 浏览器
    options = webdriver.ChromeOptions()
    # 添加UA
    options.add_argument('user-agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"')

    # 谷歌文档提到需要加上这个属性来规避bug
    options.add_argument('--disable-gpu')

    # 隐藏滚动条, 应对一些特殊页面
    # options.add_argument('--hide-scrollbars')

    # 不加载图片, 提升速度
    options.add_argument('blink-settings=imagesEnabled=false') 

    # 浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败
    options.add_argument('--headless') 

    # 以最高权限运行
    # options.add_argument('--no-sandbox')
    
    # 禁用JavaScript
    # option.add_argument("--disable-javascript") 

    # 设置开发者模式启动，该模式下webdriver属性为正常值
    options.add_experimental_option('excludeSwitches', ['enable-automation']) 

    driver = webdriver.Chrome(options=options)
    # 获取当前工作目录
    current_dir = os.getcwd()
    # 打开文件，按行读取文件
    with open(os.path.join( current_dir ,'name.txt'), 'r', encoding='utf-8') as f:
        names = f.readlines()
    with open(os.path.join( current_dir ,'result.txt'), 'r', encoding='utf-8') as f:
        results = f.readlines()
    for n in names:
        name=n.replace('\n','')
        # 手动录入翻译内容，q存放
        en_name_tmp = translateChineseToEnglish(name)
        if en_name_tmp == '' or en_name_tmp == None:
            continue
        en_name = ''.join([x[0].upper() for x in en_name_tmp.split()])
        # 搜索主编
        search_url = 'https://www.toutiao.com/search/?keyword={}&pd=user&source=search_subtab_switch&action_type=search_subtab_switch&page_num=0&search_id=&from=media&cur_tab_title=media'.format(name)
        driver.get(search_url)
        time.sleep(3)
        p_pic = driver.find_element(By.XPATH, '/html/body/div[2]/div[2]/div[1]/div/div/div/div[1]/a')
        p_pic.click()
        # 等待新窗口出现
        WebDriverWait(driver, 15).until(EC.number_of_windows_to_be(2))
        # 获取所有窗口的句柄
        window_handles = driver.window_handles
        # 切换到新窗口
        driver.switch_to.window(window_handles[1])
        # b_pic = driver.find_element(By.ID, 'verify-bar-close')
        # b_pic.click()
        # 获取新窗口的URL
        time.sleep(4)
        r_pic = driver.find_element(By.CSS_SELECTOR, '#root > div > div.main-wrapper > div.main-l > div > div.profile-tabs-nav > ul > li:nth-child(2)')
        r_pic.click()
        # 等待主编个人主页加载完成
        time.sleep(2)
        article_pic = driver.find_element(By.XPATH, '/html/body/div[1]/div/div[3]/div[1]/div/div[1]/ul/li[2]')
        article_pic.click()

        # 滚动到页面底部 加载全部数据
        time.sleep(5)
        # 模拟向下滚动页面
        pageScroll(driver)

        # 获取所有文章的链接列表
        article_links = []
        soup = BeautifulSoup(driver.page_source, 'html.parser')

        # 获取<div>标签下的所有<a>标签
        div_list = soup.find_all('div', attrs={'class': 'profile-article-card-wrapper'})
        for div in div_list:
            a_list = div.find_all('a', attrs={'class': 'title'})
            for a in a_list:
                href = a.get('href')
                article_links.append(href) 
                print(href)

        # 遍历文章
        for article_link in article_links:
            # 进入文章页面
            driver.get(article_link)

            # 获取文章页面源码
            soup = BeautifulSoup(driver.page_source, 'html.parser')

            # 获取文章标题并翻译成英文
            article_title = soup.select_one('h1').text

            # 建立会话，返回结果
            try:
                en_title = translateChineseToEnglish(article_title)
                if 'LD' == en_title:
                    log.printLog(f"暂停。。。")
                elif ''== en_title or None == en_title:
                    continue
                # 生成文件夹名
                # 获取用户home目录
                # home = '/Volumes/娱乐/'
                home = current_dir
                folder_name = ''.join([x[0].upper() for x in en_title.split()])
                if en_name+'---'+folder_name+'\n' in results:
                    continue
                # 创建文件夹
                folder_path = os.path.join(home, "mydata", en_name, folder_name )
                os.makedirs(folder_path, exist_ok=True)
                # 下载图片
                img_tags = soup.select('.pgc-img img')
                #封装素材相关
                flag = False
                # JSON 格式的字符串
                json_str = '{}'
                json_data_str = '{}'
                # 解析 JSON 字符串为 Python 对象
                article = json.loads(json_str)
                json_data = json.loads(json_data_str)
                article["title"] = folder_name
                images_path = []
                articles=[]
                for i, img_tag in enumerate(img_tags):
                    img_url = img_tag['src']
                    img_extension = img_tag['mime_type'].split('/')[1]
                    if img_extension=='webp':
                        img_extension='jpg'
                    img_name = '{}-{}.{}'.format(i, folder_name, img_extension)
                    file_path_0 = os.path.join(home, "mydata", en_name, folder_name, '0-'+img_name)
                    file_path_1 = os.path.join(home, "mydata", en_name, folder_name, '1-'+img_name)
                    img_extension_txt = 'txt'
                    img_name_txt = '{}-{}.{}'.format(i, folder_name, img_extension_txt)
                    file_path_txt = os.path.join(home, "mydata", en_name, folder_name, '0-' + img_name_txt)
                    # 判断文件是否存在
                    if os.path.exists(file_path_txt) or os.path.exists(os.path.join(home, "mydata", en_name, folder_name, '1-'+img_name_txt)):
                        print("this file was exist")
                        with open(file_path_txt, 'r', encoding='utf-8') as f:
                            images_path.append(f.readline())
                    else:
                        with open(file_path_0, 'wb') as f:
                            img_response = requests.get(img_url)
                            time.sleep(1)
                            f.write(img_response.content)
                            time.sleep(1)
                            # 判断文件大小是否小于250KB或大于10Mb
                            if os.path.getsize(file_path_0) < 250*1024 or os.path.getsize(file_path_0) > 10*1024*1024:
                                log.printLog('文件大小小于250KB或大于10Mb'  )
                            elif flag==False:
                                poimage.del_watermark(input_image=file_path_0,output_image=file_path_1)
                                article["cover_image_path"] = file_path_1
                                flag = True
                            else:
                                poimage.del_watermark(input_image=file_path_0,output_image=file_path_1)
                                minimum_color = get_minimum_color(file_path_1)
                                color_percentages = calculate_color_percentages(file_path_1)
                                max_colors = get_max_color_triplet(color_percentages)
                                tmp_url = upLoadImage(file_path_1).get("url")+'|'+minimum_color+'|'+max_colors
                                images_path.append(tmp_url)
                                with open(file_path_txt, 'a') as f:
                                    f.write(tmp_url)
                                removeFile(file_path_1)

                article["images_path"] = images_path
                articles.append(article)
                json_data["articles"]=articles
                sendToWxgzh(json_data)
                log.printLog(f"{folder_name}草稿创建成功。")
                # 记录已经存储图片文件夹
                with open('./result.txt', 'a') as f:
                    f.write(en_name+'---'+folder_name+'\n')  # 写入新文件，注意行末要加上换行符
            except Exception as e:
                log.printLog(file_path_1)
                log.printLog(e)
            finally:
                if httpClient:
                    httpClient.close()
    # 关闭浏览器
    driver.quit()
    


async def main():
    # 封面
    file_path_0="C:\\Users\\EDZ\\Desktop\\t-1.jpeg"
    # 插图
    file_path_1="C:\\Users\\EDZ\\Desktop\\t-1.jpeg"
    # 内容
    content='一个骨瘦如柴的儿童：这个儿童躺在地上，身体瘦弱，看起来非常虚弱。他的衣服破旧，面容憔悴，显然是长时间处于贫困和不良的健康状况。一只秃鹰：这只秃鹰站在儿童附近，头颈裸露，双翼合拢，身体直立。它似乎在等待儿童死亡，以便啄食其尸体。背景：图片的背景是一个宽广的平原或荒野，没有建筑物或人类活动的迹象。天空是晴朗的，没有云层，显得格外宁静和冷酷。'

    # JSON 格式的字符串
    json_str = '{}'
    json_data_str = '{}'
    # 解析 JSON 字符串为 Python 对象
    article = json.loads(json_str)
    json_data = json.loads(json_data_str)
    article["cover_image_path"] = file_path_0
    images_path = []
    minimum_color = get_minimum_color(file_path_1)
    color_percentages = calculate_color_percentages(file_path_1)
    max_colors = get_max_color_triplet(color_percentages)
    tmp_url = upLoadImage(file_path_1).get("url") + '|' + minimum_color + '|' + max_colors
    images_path.append(tmp_url)
    article["images_path"] = images_path
    articles.append(article)
    json_data["articles"] = articles
    sendPictureArticle(json_data,content)

# 使用 asyncio 运行主函数
asyncio.run(main())