from sqlalchemy import Column, Integer, String, Text, DateTime, Boolean, SmallInteger, BigInteger, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine, func
from sqlalchemy.orm import sessionmaker
from datetime import datetime

Base = declarative_base()


class CrawlSource(Base):
    __tablename__ = 'crawl_source'

    id = Column(Integer, primary_key=True, autoincrement=True, comment='主键')
    source_name = Column(String(50), nullable=True, comment='源站名')
    crawl_rule = Column(Text, nullable=True, comment='爬取规则（json串）')
    source_status = Column(SmallInteger, default=0, comment='爬虫源状态，0：关闭，1：开启')
    create_time = Column(DateTime, nullable=True, comment='创建时间')
    update_time = Column(DateTime, onupdate=func.now(), nullable=True, comment='更新时间')


class CrawlSingleTask(Base):
    __tablename__ = 'crawl_single_task'

    id = Column(Integer, primary_key=True, autoincrement=True, comment='主键')
    source_id = Column(Integer, nullable=True, comment='爬虫源ID')
    source_name = Column(String(50), nullable=True, comment='爬虫源名')
    source_book_id = Column(String(255), nullable=True, comment='源站小说ID')
    book_id = Column(String(255), nullable=True, comment='book表小说ID')
    cat_id = Column(Integer, nullable=True, comment='分类ID')
    book_name = Column(String(50), nullable=True, comment='爬取的小说名')
    author_name = Column(String(50), nullable=True, comment='爬取的小说作者名')
    task_status = Column(SmallInteger, default=2, comment='任务状态，0：失败，1：成功，2；未执行')
    exc_count = Column(SmallInteger, default=0, comment='已经执行次数，最多执行5次')
    create_time = Column(DateTime, nullable=True, comment='创建时间')

# class Book(Base):
#     __tablename__ = 'book'
#
#     id = Column(BigInteger, primary_key=True, autoincrement=True)
#     work_direction = Column(Integer, nullable=True, comment='作品方向，0：男频，1：女频')
#     cat_id = Column(Integer, nullable=True, comment='分类ID')
#     cat_name = Column(String(50), nullable=True, comment='分类名')
#     pic_url = Column(String(200), nullable=True, comment='小说封面')
#     book_name = Column(String(50), nullable=False, comment='小说名')
#     author_id = Column(BigInteger, nullable=True, comment='作者id')
#     author_name = Column(String(50), nullable=False, comment='作者名')
#     book_desc = Column(String(2000), nullable=False, comment='书籍描述')
#     score = Column(Float, nullable=True, comment='评分')
#     book_status = Column(Integer, default=0, comment='书籍状态，0：连载中，1：已完结')
#     visit_count = Column(BigInteger, default=103, comment='点击量')
#     word_count = Column(Integer, nullable=True, comment='总字数')
#     comment_count = Column(Integer, default=0, comment='评论数')
#     yesterday_buy = Column(Integer, default=0, comment='昨日订阅数')
#     last_index_id = Column(BigInteger, nullable=True, comment='最新目录ID')
#     last_index_name = Column(String(50), nullable=True, comment='最新目录名')
#     last_index_update_time = Column(DateTime, nullable=True, comment='最新目录更新时间')
#     is_vip = Column(Boolean, default=False, comment='是否收费')
#     status = Column(Boolean, default=False, comment='状态')
#     update_time = Column(DateTime, nullable=False, comment='更新时间')
#     create_time = Column(DateTime, nullable=True, comment='创建时间')
#     crawl_source_id = Column(Integer, nullable=True, comment='爬虫源站ID')
#     crawl_book_id = Column(String(32), nullable=True, comment='抓取的源站小说ID')
#     crawl_last_time = Column(DateTime, nullable=True, comment='最后一次的抓取时间')
#     crawl_is_stop = Column(Boolean, default=False, comment='是否已停止更新')
