from crawler.startup import *
from .cards import *
from datetime import datetime, timedelta

id = 5
Card_hityzb = Card(id=id, name='hityzb', cardname='HIT 招生公告', url="http://yzb.hit.edu.cn/8822/list.htm")


class Hityzb(Base):
    __tablename__ = 'hityzb'
    title = Column(String(100), primary_key=True)
    url = Column(String(200))
    time = Column(DateTime, comment='创建时间')
    __mapper_args__ = {
        "order_by": time.desc()
    }


Table_hityzb = Table('hityzb', metadata,
              Column('title', String(100), primary_key=True),
              Column('url', String(200)),
              Column('time', DateTime))


def run():
    res = session.query(Card).filter_by(id=id).first()
    if res is None:
        session.add_all([Card_hityzb])
        metadata.create_all(engine)
    elif res.is_lock:
        print(res.is_lock)
        return
    elif res.updatetime and datetime.now() - res.updatetime < timedelta(hours=3):
        print('updatetime is so short')
        return
    driver.implicitly_wait(10)
    try:
        # 等待十秒加载不出来就会抛出异常，10秒内加载出来正常返回
        driver.get(Card_hityzb.url)
        data = driver.find_elements_by_xpath('/html/body/div[3]/div/table/tbody/tr[2]/td/table/tbody/tr/td[3]/table/tbody/tr[4]/td/div/div[1]/table/tbody//td/span[3]/a')
        time = driver.find_elements_by_xpath("/html/body/div[3]/div/table/tbody/tr[2]/td/table/tbody/tr/td[3]/table/tbody/tr[4]/td/div/div[1]/table/tbody//td/span[1]")
        for i in range(0, len(data)):
            session.merge(Hityzb(title=data[i].text, url=data[i].get_attribute('href'), time=datetime.strptime(time[i].text, "%Y年%m月%d日")))
        # pass
    except Exception as e:
        print(e)
    # session.merge(model)
    session.merge(Card_hityzb)
