from crawler.startup import *
from .cards import *
from datetime import datetime, timedelta
Card_chinakaoyan = Card(id=1, name='chinakaoyan', cardname='中国考研网', url="http://www.chinakaoyan.com/info/list/ClassID/9.shtml")


class ChinaKaoYan(Base):
    __tablename__ = 'chinakaoyan'
    title = Column(String(100), primary_key=True)
    url = Column(String(200))
    time = Column(DateTime, comment='创建时间')
    __mapper_args__ = {
        "order_by": time.desc()
    }


Table_chinakaoyan = Table('chinakaoyan', metadata,
              Column('title', String(100), primary_key=True),
              Column('url', String(200)),
              Column('time', DateTime))


def run():
    print("this is test")
    # driver.get('https://www.baidu.com')
    # print(driver.page_source)
    res = session.query(Card).filter_by(id=1).first()
    if res is None:
        session.add_all([Card_chinakaoyan])
        metadata.create_all(engine)
    elif res.is_lock:
        print(res.is_lock)
        return
    elif res.updatetime and datetime.now() - res.updatetime < timedelta(hours=3):
        print('updatetime is so short')
        return
    driver.implicitly_wait(10)
    try:
        # 等待十秒加载不出来就会抛出异常，10秒内加载出来正常返回
        driver.get(Card_chinakaoyan.url)
        data = driver.find_elements_by_xpath('/html/body/div[4]/div[3]/div[1]/div[2]/ul//a')
        # print(driver.page_source)
        # data = driver.find_element_by_class_name('list_box f-r')
        # data = driver.find_element_by_tag_name('a')
        
        #print([x.text for x in data ])
        #print([x.get_attribute('href') for x in data])
        for x in data:
            session.merge(ChinaKaoYan(title=x.text, url=x.get_attribute('href'), time=func.now()))
        # pass
    except Exception as e:
        print(e)
    # session.merge(model)
    session.merge(Card_chinakaoyan)
