import os
from datetime import datetime

import chardet
import pymysql
import requests
from bs4 import BeautifulSoup

# 配置
MYSQL_CONFIG = {
  'host': 'localhost',
  'user': 'root',
  'password': '123456',
  'database': 'library',
  'charset': 'utf8mb4'
}
BOOKMARK_FILE = '/bookmarks.htm'
REQUEST_TIMEOUT = 5
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'


def create_db_connection():
  """创建数据库连接并初始化表"""
  try:
    conn = pymysql.connect(**MYSQL_CONFIG, cursorclass=pymysql.cursors.DictCursor)
    # 检查表是否存在，不存在则创建
    with conn.cursor() as cursor:
      cursor.execute('''
        CREATE TABLE IF NOT EXISTS bookmarks (
          id INT AUTO_INCREMENT PRIMARY KEY,
          title VARCHAR(255),
          url VARCHAR(512) UNIQUE,
          add_date DATETIME,
          last_visit DATETIME,
          last_modified DATETIME,
          is_accessible TINYINT(1) DEFAULT 0,
          last_check DATETIME
        )
      ''')
      conn.commit()
    return conn
  except pymysql.Error as e:
    print(f"数据库连接错误: {e}")
    return None


def timestamp_to_str(timestamp):
  """Unix时间戳转字符串"""
  return datetime.fromtimestamp(int(timestamp)).strftime('%Y-%m-%d %H:%M:%S') if timestamp and timestamp.isdigit() else None


def is_url_accessible(url):
  """检查URL是否可访问"""
  if not url or not url.startswith(('http://', 'https://')):
    return False

  try:
    response = requests.head(
        url,
        timeout=REQUEST_TIMEOUT,
        headers={'User-Agent': USER_AGENT},
        allow_redirects=True
    )
    return response.status_code < 400
  except (requests.RequestException, ValueError):
    return False


def parse_bookmarks(file_path):
  """解析书签HTML文件"""
  # 尝试用不同编码打开文件
  encodings = ['utf-8', 'gbk', 'gb18030', 'iso-8859-1']
  for encoding in encodings:
    try:
      with open(file_path, 'r', encoding=encoding) as f:
        soup = BeautifulSoup(f, 'html.parser')
        return [{
          'title': a.text.strip(),
          'url': a['href'].strip(),
          'add_date': timestamp_to_str(a.get('add_date')),
          'last_visit': timestamp_to_str(a.get('last_visit')),
          'last_modified': timestamp_to_str(a.get('last_modified')),
          'is_accessible': is_url_accessible(a['href'].strip())
        } for a in soup.find_all('a', href=True)]
    except UnicodeDecodeError:
      continue

  # 如果所有编码都失败，尝试二进制模式
  try:
    with open(file_path, 'rb') as f:
      encoding = chardet.detect(f.read())['encoding']
      soup = BeautifulSoup(f.read().decode(encoding), 'html.parser')
      return [{
        'title': a.text.strip(),
        'url': a['href'].strip(),
        'add_date': timestamp_to_str(a.get('add_date')),
        'last_visit': timestamp_to_str(a.get('last_visit')),
        'last_modified': timestamp_to_str(a.get('last_modified')),
        'is_accessible': is_url_accessible(a['href'].strip())
      } for a in soup.find_all('a', href=True)]
  except Exception as e:
    print(f"解析错误: {e}")
    return []


def insert_bookmarks(conn, bookmarks):
  """插入书签到数据库"""
  if not conn: return 0

  # 检查重复和相似URL
  duplicates = []
  similar = []
  urls = [bm['url'] for bm in bookmarks]
  for i, url1 in enumerate(urls):
    for j, url2 in enumerate(urls[i + 1:], i + 1):
      if url1 == url2:
        duplicates.append((bookmarks[i], bookmarks[j]))
      elif url1.startswith(url2) or url2.startswith(url1):
        similar.append((bookmarks[i], bookmarks[j]))

  for dup in duplicates:
    print(f"重复: {dup[0]['url']}\n  URL1: {dup[0]['title']}\n  URL2: {dup[1]['title']}")
  for sim in similar:
    print(f"相似: {sim[0]['url']} 和 {sim[1]['url']}\n  URL1: {sim[0]['title']}\n  URL2: {sim[1]['title']}")

  # 插入数据
  inserted = 0
  skipped = 0
  with conn.cursor() as cursor:
    for bm in bookmarks:
      try:
        cursor.execute('''
          INSERT INTO bookmarks (title, url, add_date, last_visit, last_modified, is_accessible, last_check)
          VALUES (%s, %s, %s, %s, %s, %s, NOW())
        ''', (bm['title'], bm['url'], bm['add_date'], bm['last_visit'], bm['last_modified'], int(bm['is_accessible'])))
        inserted += 1
        status = "可访问" if bm['is_accessible'] else "不可访问"
        print(f"已添加: {bm['title']} ({status})")
      except pymysql.IntegrityError:
        # 更新已有记录的访问状态
        cursor.execute('''
          UPDATE bookmarks 
          SET is_accessible = %s, last_check = NOW()
          WHERE url = %s
        ''', (int(bm['is_accessible']), bm['url']))
        conn.commit()
        skipped += 1
        status = "可访问" if bm['is_accessible'] else "不可访问"
        print(f"已更新: {bm['title']} ({status})")
      except pymysql.Error as e:
        print(f"插入错误: {e}")
        conn.rollback()
  conn.commit()
  return inserted, skipped


if __name__ == '__main__':
  if not os.path.exists(BOOKMARK_FILE):
    print(f"错误: 文件不存在 - {BOOKMARK_FILE}")
    exit(1)

  print("\n连接数据库...")
  _conn = create_db_connection()
  if not _conn: exit(2)

  print("\n解析书签文件...")
  _bookmarks = parse_bookmarks(BOOKMARK_FILE)
  print(f"找到 {len(_bookmarks)} 个书签")

  print("\n写入数据库...")
  _inserted, _skipped = insert_bookmarks(_conn, _bookmarks)
  print(f"成功写入 {_inserted} 个书签，跳过 {_skipped} 个已存在的书签")

  _conn.close()
  print("\n处理完成!")
