# from sqlalchemy import create_engine
from .models import NewsNew, NewsCategory
from celery import task

@task
def da_new(df):
    # print(df)
    # print(df.columns)
    # x['category']表示拿到category列对应的值，通过lambda遍历，将这些结果以一列（axis=1）返回
    # 给df再创建（df对象的apply方法）categroy_id一列
    df['categroy_id'] = df.apply(lambda x: sr_new(x['category']), axis=1)

    df = df[['title', 'url', 'ctime', 'categroy_id']]
    df = df.rename(columns={'ctime': 'new_time'})

    # 方式一，用pandas插入时间效率高，但是是一次性入库的，用户刷新页面看不到数据动态增加的效果
    # conn = create_engine('mysql+pymysql://root:root@localhost:3306/task_config_pro?charset=utf8')
    # df.to_sql('news',conn,index=False,if_exists='append')

    # 方式二，数据一条一条插入到数据库，用户页面刷新新闻列表能看到效果
    # iterrows方法将df实现迭代器iter变成可迭代对象，遍历每一行
    for idx, row in df.iterrows():
        title = row['title']
        url = row['url']
        new_time = row['new_time']
        categroy_id = row['categroy_id']
        # 把pandas中的nan(空类型)转成字符串nan
        categroy_id = str(categroy_id)
        if categroy_id != 'nan':
            # pandas提取excel数字都是float类型，但categroy_id是int类型，所以要转
            categroy_id = int(float(categroy_id))
            cate = NewsCategory.objects.get(id=categroy_id)
            new = NewsNew(title=title, url=url, new_time=new_time, categroy=cate)
            new.save()
        else:
            # categroy可以为空
            new = NewsNew(title=title, url=url, new_time=new_time)
            new.save()

    return 'success'


def sr_new(sr):
    # print(sr)
    name = sr
    newsCategory = NewsCategory.objects.filter(name=name).first()
    # 如果这篇新闻的分类在新闻分类库中不存在，则返回None，存在返回分类id
    if newsCategory:
        id = newsCategory.id
    else:
        id = None
    return id
