import time
from time import sleep

import requests

#白小纯帅照下载
# url='https://b0.bdstatic.com/69b466b277854a0a1c373e9f36ec16a0.jpg@h_1280'
# response = requests.get(url)
# #以写文件的方式将图片写入文件
# with open("D:/python代码/PythonProject1/flask_quiz/static/image/2.jpg", "wb") as f:
#     f.write(response.content)

# from bs4 import BeautifulSoup
# from urllib3 import response
# import lxml
#
# # 定义目标 URL
# url = "https://m.maoyan.com/asgard/movie/1490977"
#
# # 设置请求头，伪装为浏览器访问
# headers = {
#     "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36"
# }
#
# # 发送请求
# response = requests.get(url, headers=headers)
#
# # 检查状态码
# if response.status_code == 200:
#     print("请求成功！")
#     print(response.text[:500])  # 打印部分网页内容
# else:
#     print(f"请求失败，状态码: {response.status_code}")
#
# # 使用 BeautifulSoup 解析 HTML
# soup = BeautifulSoup(response.text, "lxml")
#
# links = soup.find_all('span')
# for link in links:
#     bid = link.get('data-view-bid')
#     text = link.text
#     print(f"<UNK>: {bid}")
#     print(f"<UNK>: {text}")
# # 提取网页标题
# title = soup.title.string
# print(f"网页标题: {title}")

# from bs4 import BeautifulSoup
# import requests
#
# # 指定你想要获取标题的网站
# url = 'https://cn.bing.com/'  # 抓取bing搜索引擎的网页内容
#
# # 发送HTTP请求获取网页内容
# response = requests.get(url)
# # 中文乱码问题
# response.encoding = 'utf-8'
# # 确保请求成功
# if response.status_code == 200:
#     # 使用BeautifulSoup解析网页内容
#     soup = BeautifulSoup(response.text, 'lxml')
#
#     # 查找<title>标签
#     title_tag = soup.find('title')
#
#     # 打印标题文本
#     if title_tag:
#         print(title_tag.get_text())
#     else:
#         print("未找到<title>标签")
# else:
#     print("请求失败，状态码：", response.status_code)

# import requests
# url = 'https://cn.bing.com/'
# response = requests.get(url)
#
# # 使用 chardet 自动检测编码
# import chardet
# encoding = chardet.detect(response.content)['encoding']
# print(encoding)
# response.encoding = encoding


# from bs4 import BeautifulSoup
# import requests
#
# # 指定你想要获取标题的网站
# url = 'https://www.baidu.com/' # 抓取bing搜索引擎的网页内容
#
# # 发送HTTP请求获取网页内容
# response = requests.get(url)
# # 中文乱码问题
# response.encoding = 'utf-8'
#
# soup = BeautifulSoup(response.text, 'lxml')
#
# # 查找第一个 <a> 标签
# first_link = soup.find('a')
# print(first_link)
# print("----------------------------")
#
# # 获取第一个 <a> 标签的 href 属性
# first_link_url = first_link.get('href')
# print(first_link_url)
# print("----------------------------")
#
# # 查找所有 <a> 标签
# all_links = soup.find_all('a')
# print(all_links)



# from bs4 import BeautifulSoup
# import requests
#
# # 指定你想要获取标题的网站
# url = 'https://www.baidu.com/' # 抓取bing搜索引擎的网页内容
#
# # 发送HTTP请求获取网页内容
# response = requests.get(url)
# # 中文乱码问题
# response.encoding = 'utf-8'
#
# soup = BeautifulSoup(response.text, 'lxml')
#
# # 获取第一个 <p> 标签中的文本内容
# paragraph_text = soup.find('p').get_text()
#
# # 获取页面中所有文本内容
# all_text = soup.get_text()
# print(all_text)


# from bs4 import BeautifulSoup
# import requests
#
# # 指定你想要获取标题的网站
# url = 'https://www.baidu.com/' # 抓取bing搜索引擎的网页内容
#
# # 发送HTTP请求获取网页内容
# response = requests.get(url)
# # 中文乱码问题
# response.encoding = 'utf-8'
#
# soup = BeautifulSoup(response.text, 'lxml')
#
# # 查找具有 id="unique-id" 的 <input> 标签
# unique_input = soup.find('input', id='su')
#
# input_value = unique_input['value'] # 获取 input 输入框的值
#
# print(input_value)


import requests
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from selenium import webdriver
from selenium.webdriver.edge.service import Service
from selenium.webdriver.common.by import By
import random
import lxml
import pandas as pd
import csv
import time

url = "https://quote.eastmoney.com/center/gridlist.html#us_wellknown"


driver = webdriver.Edge()
driver.get(url)
html = driver.page_source
# print(html)
#对获取的网页进行处理
soup = BeautifulSoup(html, 'lxml')
trs = soup.find_all('tr')
data = []
for tr in trs[1:]:
    tds = tr.find_all(['th','td'])
    s = [td.get_text(strip=True) for td in tds]
    data.append(s)
df = pd.DataFrame(data)
print(df)

df.to_csv('1.csv')
with open('2.csv', mode='a', newline='', encoding='utf-8') as f:
    write = csv.writer(f)
    write.writerows(data)

time.sleep(3)

menu_xpath = '//*[@id="mainc"]/div/div[2]/div/a[last()]'
menu = driver.find_element(By.XPATH, menu_xpath)
menu.click()

time.sleep(3)


def save(data,filename='2.csv',exists=False):
    if exists:
        with open(filename, mode='w', newline='', encoding='utf-8') as f:
            write = csv.writer(f)
            write.writerows(data)
    else:
        with open(filename, mode='a', newline='', encoding='utf-8') as f:
            write = csv.writer(f)
            write.writerows(data)

num = 1
while True:
    try:
        print(f"正在处理第 {num} 页...")
        sleep(3)
        html = driver.page_source
        soup = BeautifulSoup(html, 'lxml')
        trs = soup.find_all('tr')

        data = []
        for tr in trs[1:]:
            tds = tr.find_all(['th', 'td'])
            s = [td.get_text(strip=True) for td in tds]
            data.append(s)
        save(data)
        print(f"第 {num} 页保存成功，共 {len(data)} 条记录")
        try:
            sleep(3)
            menu_xpath = '//*[@id="mainc"]/div/div[2]/div/a[last()]'
            menu = driver.find_element(By.XPATH, menu_xpath)
            menu.click()
            print(f"已点击下一页，等待页面加载...")
            sleep(3)
            num += 1
        except Exception as e:
            print(f"未找到下一页按钮或点击失败: {e}")
            print("爬取完成，共获取", num, "页数据")
            break
    except Exception as e:
        print(f"处理页面时出错: {e}")
        break

# page_num = 1
#
# while True:
#     try:
#         print(f"正在处理第 {page_num} 页...")
#
#         # 等待表格加载完成
#         WebDriverWait(driver, 10).until(
#             EC.presence_of_element_located((By.XPATH, '//table/tbody/tr'))
#         )
#
#         # 获取当前页面数据
#         html = driver.page_source
#         soup = BeautifulSoup(html, 'lxml')
#         trs = soup.select('table.m-table tbody tr')
#
#         page_data = []
#         for tr in trs:
#             tds = tr.find_all(['td', 'th'])
#             row = [td.get_text(strip=True) for td in tds]
#             if row:  # 过滤空行
#                 page_data.append(row)
#
#         # 保存当前页数据
#         save_to_csv(page_data)
#         print(f"第 {page_num} 页保存成功，共 {len(page_data)} 条记录")
#
#         # 查找下一页按钮并点击
#         try:
#             next_btn = WebDriverWait(driver, 5).until(
#                 EC.element_to_be_clickable((By.XPATH, '//a[contains(text(), "下一页")]'))
#             )
#             next_btn.click()
#             print(f"已点击下一页，等待页面加载...")
#
#             # 随机延迟，避免操作过快
#             time.sleep(random.uniform(2, 4))
#
#             # 等待页面更新
#             WebDriverWait(driver, 10).until(
#                 lambda d: len(d.find_elements(By.XPATH, '//table/tbody/tr')) > 0
#             )
#
#             page_num += 1
#
#         except Exception as e:
#             print(f"未找到下一页按钮或点击失败: {e}")
#             print("爬取完成，共获取", page_num, "页数据")
#             break
#
#     except Exception as e:
#         print(f"处理页面时出错: {e}")
#         break




# 定义保存CSV的函数
# def save_to_csv(data, filename='stocks.csv', is_header=False):
#     df = pd.DataFrame(data)
#     if is_header:
#         df.to_csv(filename, index=False, mode='w', encoding='utf-8-sig')
#     else:
#         df.to_csv(filename, index=False, mode='a', header=False, encoding='utf-8-sig')


# from bs4 import BeautifulSoup
# from selenium import webdriver
# import pandas as pd
# import csv
#
# # 初始化浏览器
# driver = webdriver.Edge()
# driver.get(url)
# html = driver.page_source
# driver.quit()  # 关闭浏览器
#
# # 解析HTML
# soup = BeautifulSoup(html, 'lxml')
#
# # 提取表头
# thead = soup.find('thead')
# if thead:
#     header_row = thead.find('tr')
#     headers = [th.get_text(strip=True) for th in header_row.find_all('th')]
# else:
#     # 如果没有thead，尝试从第一行提取表头
#     first_row = soup.find('tr')
#     headers = [th.get_text(strip=True) for th in first_row.find_all(['th', 'td'])]
#
# # 提取数据行
# data_rows = []
# for row in soup.find_all('tr')[1:]:  # 跳过第一行（已作为表头）
#     cells = row.find_all(['td', 'th'])
#     row_data = [cell.get_text(strip=True) for cell in cells]
#     data_rows.append(row_data)
#
# # 创建DataFrame
# df = pd.DataFrame(data_rows, columns=headers)
#
# # 打印数据验证
# print(df.head())
#
# # 写入CSV文件
# df.to_csv('output.csv', index=False, encoding='utf-8-sig')
#
# # 或者使用csv模块手动写入
# with open('output_manual.csv', 'w', newline='', encoding='utf-8-sig') as f:
#     writer = csv.writer(f)
#     writer.writerows(data_rows)  # 写入数据

driver.close()

