package com.openess.bigsearch.engine.DAO;

import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;

import com.openess.bigsearch.engine.search.IndexSearcher.DocScore;

public class MysqlManager implements DBManager {
	private PreparedStatement pstat;
	private ResultSet rs;
	private Connection conn;
	/**
	 * 分页计数器
	 */
	private int pageCount = 1;
	/**
	 * 设置一次性读取多少条记录
	 */
	public static final int MaxCount = 2000;

	public MysqlManager(Type type) {
		init(type);
	}

	/**
	 * 建立数据库连接
	 * 
	 * @return
	 * @throws SQLException
	 * @throws ClassNotFoundException
	 */
	public static Connection getConnection() throws SQLException,
			ClassNotFoundException {
		// 注册驱动器类
		Class.forName("com.mysql.jdbc.Driver");
		String url = "jdbc:mysql://localhost:3306/bigsearch";
		String username = "root";
		String password = "xs1988130";
		return DriverManager.getConnection(url, username, password);
	}

	public void init(Type type) {
		try {
			conn = getConnection();
			if (type == Type.SEARCH)
				pstat = conn.prepareStatement("");
			else if (type == Type.INDEX) {
				String command = "SELECT * FROM crawler LIMIT ?, ?";
				pstat = conn.prepareStatement(command);
				readPage(0);
			}
		} catch (SQLException e) {
			e.printStackTrace();
		} catch (ClassNotFoundException e) {
			e.printStackTrace();
		}
	}

	/**
	 * 一次性将指定数量的数据加载到ResultSet中
	 * 
	 * @param count
	 *            读取的次数
	 * @throws SQLException
	 */
	private void readPage(int count) throws SQLException {
		pstat.setInt(1, count * MaxCount);
		pstat.setInt(2, MaxCount);
		rs = pstat.executeQuery();
	}

	/**
	 * 每次调用都是读取下一条数据
	 * 
	 * @return
	 * @throws SQLException
	 */
	public DBData readDBData() {
		try {
			boolean hasn = rs.next();
			// 已读取完一页，数据库重新读取下一页
			if (!hasn) {
				readPage(pageCount++);
				hasn = rs.next();
			}
			if (hasn) {
				String docID = rs.getString("id");
				String url = rs.getString("url");
				String title = rs.getString("title");
				String keywords = rs.getString("keywords");
				String description = rs.getString("description");
				String content = rs.getString("content");
				return new DBData(docID, url, title, keywords, description,
						content);
			}
		} catch (SQLException e) {
			e.printStackTrace();
		}
		return null;
	}

	/**
	 * 拿到前top个结果
	 * 
	 * @param DSList
	 *            前top个文档
	 * @return
	 * @throws SQLException
	 */
	public DocScore[] readTop(DocScore[] DSList) {
		try {
			String command = "SELECT * FROM crawler WHERE id = "
					+ DSList[0].docID;
			for (int i = 1; i < DSList.length; i++) {
				command += " OR id = " + DSList[i].docID;
			}
			rs = pstat.executeQuery(command);
			int i = 0;
			// 将DBData转换成DocScore对象后，往其中传入DBData不具有的值Score，Sumy因为还没计算所以为空不用传
			DBData d = null;
			while ((d = readDBData()) != null) {
				DocScore ds = DSList[i];
				DocScore temp = new DocScore(d);
				temp.setScore(ds.getScore());
				DSList[i] = temp;
				i++;
			}
		} catch (SQLException e) {
			e.printStackTrace();
		}
		return DSList;
	}

	/**
	 * 关闭jdbc资源
	 */
	public void close() {
		try {
			rs.close();
			pstat.close();
			conn.close();
		} catch (SQLException e) {
			e.printStackTrace();
		}

	}
}
