from bs4 import BeautifulSoup
import requests
import warnings
import urllib3
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
import json
from selenium.webdriver.edge.service import Service
import json

end = '''———————————————————————————————————————————————————————————————————————————

转自：环球网

指导教师：杨欣笛

编者：张博涵'''


def get_titles_and_url(response):
    html = response.text
    soup = BeautifulSoup(html, "html.parser")
    a_tags = soup.findAll('a', href=True)
    links_title = {}

    for a_tag in a_tags:
        if "/article/" or "2025" in a_tag['href']:
            title = a_tag.get_text()
            links_title[a_tag['href']] = title
    print(links_title)
    return links_title


def get_response_from_url(url):
    if not url.startswith("https://"):
        if url.startswith('/'):
            url = 'https:/' + url
        if url.startswith('//'):
            url = 'https:' + url
        if url.startswith('///'):
            url = 'https://oversea.huanqiu.com/article/4JCZ89SPWZ'
        else:
            url = 'https://oversea.huanqiu.com/article/4JCZ89SPWZ'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/91.0.4472.124 Safari/537.36',
    }
    response = requests.get(url, headers=headers, verify=False)
    return response


def get_article_from_url(link_titles):
    articles_with_titles = {}
    article_num = 0
    for url, title in link_titles.items():
        response = get_response_from_url(url)
        if response.status_code == 200:
            html = response.text
            soup = BeautifulSoup(html, "html.parser")
            p_text = soup.findAll('p')
            text = ''

            for p in p_text:
                text += p.get_text(strip=True)
            text += end
            if len(text) >= 150:
                articles_with_titles[title] = text.strip()
                article_num += 1
                print("已有", article_num, "篇文章")
    print("已完成")
    return articles_with_titles


def save_articles_to_file(articles, filename):
    processed_dict = {}
    for key, value in articles.items():
        if isinstance(value, bytes):
            # 如果值是字节串，尝试解码
            try:
                processed_value = value.decode('utf-8')
            except UnicodeDecodeError:
                # 如果解码失败，保留原始字节串
                processed_value = value
        else:
            # 如果值不是字节串，直接保留
            processed_value = value

        # 将处理后的值添加到新字典中
        processed_dict[key] = processed_value

    try:
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(processed_dict, f, ensure_ascii=False, indent=4)
    except (FileNotFoundError, json.JSONDecodeError, UnicodeDecodeError) as e:
        print(f"在保存文章到文件时出现错误: {e}")


def read_and_delete_article(filename):
    try:
        with open(filename, 'r+', encoding='utf-8') as file:
            articles = json.load(file)
            if articles:
                title, article = next(iter(articles.items()))
                del articles[title]
                save_articles_to_file(articles, filename)  # 更新文件
                return title, article
            else:
                print("文件中没有文章可供读取和删除。")
                return None, None
    except (FileNotFoundError, json.JSONDecodeError, UnicodeDecodeError) as e:
        print(f"在读取和删除文章时出现错误: {e}")
        return None, None


def get_articles_from_hq():
    response = get_response_from_url("https://www.huanqiu.com/")
    links_titles = get_titles_and_url(response)
    articles_with_titles = get_article_from_url(links_titles)
    save_articles_to_file(articles_with_titles, "articles.json")
    print("现在的文章数量为", {articles_with_titles.__len__()})


def check_article_count(filename):
    with open(filename, 'r', encoding='utf-8') as file:
        articles = json.load(file)
        return len(articles)

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

if __name__ == "__main__":
    #get_articles_from_hq()
    articles_filename = 'articles.json'
    service = Service(executable_path=r"D:\webdriver\edgedriver_win641\msedgedriver.exe")
    try:
        driver = webdriver.Edge(service=service)
        driver.get('https://mp.yiban.cn/app/login')
        time.sleep(2)
        input_elements = driver.find_elements(By.CSS_SELECTOR, '.mdc-text-field__input')

        if len(input_elements) >= 2:
            input_elements[0].send_keys('18340855498')

            input_elements[1].send_keys('Wsqyxd23!')
        wait = WebDriverWait(driver, 10)
        button = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, ".mdc-button")))
        button.click()
        c = input()
        if c:
            pass
        time.sleep(2)
        link = wait.until(EC.element_to_be_clickable(
            (By.XPATH, "/html/body/div/div[2]/div[1]/aside/div/div[3]/div[1]")))
        p = 0
        while p <= 50:
            p += 1
            print("到这了")
            tit, art = read_and_delete_article(articles_filename)
            link.click()
            link_2 = wait.until(EC.element_to_be_clickable(
                (By.XPATH, "/html/body/div/div[2]/div[1]/aside/div/div[3]/div[2]/nav/a[2]")))
            link_2.click()
            link_3 = wait.until(EC.element_to_be_clickable(
                (By.XPATH, "/html/body/div[1]/div[2]/div[2]/div[2]/section/button/div")))
            link_3.click()

            time.sleep(3)
            input_element = wait.until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/div[2]/div[2]/div['
                                                                                 '2]/section/div/div/div[1]/div/input')))
            input_element.send_keys(tit)

            input_element = wait.until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/div[2]/div[2]/div['
                                                                                 '2]/section/div/div/div[2]/div/span['
                                                                                 '1]/textarea')))
            input_element.send_keys(tit)

            time.sleep(2)
            iframe = driver.find_element(By.TAG_NAME, "iframe")

            driver.switch_to.frame(iframe)

            editable_element = wait.until(EC.element_to_be_clickable(
                (By.CSS_SELECTOR, "body.view")))

            editable_element.send_keys(art)
            WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.TAG_NAME, "body")))
            driver.switch_to.default_content()
            xiala_elements = driver.find_elements(By.CSS_SELECTOR, '.mdc-select__anchor')
            xiala_elements[3].click()
            link_4 = wait.until(EC.element_to_be_clickable(
                (By.XPATH,
                 "/html/body/div[1]/div[2]/div[2]/div[2]/section/div/div/div[7]/div/div[2]/div[2]/ul/li[2]/span[2]")))
            link_4.click()
            time.sleep(1)
            xiala_elements[5].click()
            time.sleep(1)
            link_5 = wait.until(EC.element_to_be_clickable(
                (By.XPATH,
                 "/html/body/div[1]/div[2]/div[2]/div[2]/section/div/div/div[8]/div/div/div[2]/ul/li[2]/span[2]")))
            link_5.click()

            link_6 = wait.until(EC.element_to_be_clickable(
                (By.XPATH,
                 "/html/body/div[1]/div[2]/div[2]/div[2]/section/div/div/div[9]/button[2]/div")))
            link_6.click()


    finally:
        print("完成 按1结束")
        if input() == 1:
            driver.quit()