# coding=utf8


import sqlalchemy
import pandas as pd


class Demo:

    @staticmethod
    def main():
        Demo.read_sql()
        Demo.read_sql_query()
        Demo.read_sql_table()

    @staticmethod
    def read_sql():
        """
        read_sql(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None,
                 chunksize: Union[int, NoneType] = None)
                -> Union[pandas.core.frame.DataFrame, Iterator[pandas.core.frame.DataFrame]]

            Read SQL query or dbs table into a DataFrame.

            This function is a convenience wrapper around ``read_sql_table`` and
            ``read_sql_query`` (for backward compatibility). It will delegate
            to the specific function depending on the provided input. A SQL query
            will be routed to ``read_sql_query``, while a dbs table name will
            be routed to ``read_sql_table``. Note that the delegated function might
            have more specific notes about their functionality not listed here.

            Parameters
            ----------
            sql : str or SQLAlchemy Selectable (select or text object)
                SQL query to be executed or a table name.
            con : SQLAlchemy connectable, str, or sqlite3 connection
                Using SQLAlchemy makes it possible to use any DB supported by that
                library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible
                for engine disposal and connection closure for the SQLAlchemy connectable; str
                connections are closed automatically. See
                `here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
            index_col : str or list of str, optional, default: None
                Column(s) to set as index(MultiIndex).
            coerce_float : bool, default True
                Attempts to convert values of non-string, non-numeric objects (like
                decimal.Decimal) to floating point, useful for SQL result sets.
            params : list, tuple or dict, optional, default: None
                List of parameters to pass to execute method.  The syntax used
                to pass parameters is dbs driver dependent. Check your
                dbs driver documentation for which of the five syntax styles,
                described in PEP 249's paramstyle, is supported.
                Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
            parse_dates : list or dict, default: None
                - List of column names to parse as dates.
                - Dict of ``{column_name: format string}`` where format string is
                  strftime compatible in case of parsing string times, or is one of
                  (D, s, ns, ms, us) in case of parsing integer timestamps.
                - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
                  to the keyword arguments of :func:`pandas.to_datetime`
                  Especially useful with databases without native Datetime support,
                  such as SQLite.
            columns : list, default: None
                List of column names to select from SQL table (only used when reading
                a table).
            chunksize : int, default None
                If specified, return an iterator where `chunksize` is the
                number of rows to include in each chunk.

            Returns
            -------
            DataFrame or Iterator[DataFrame]

            See Also
            --------
            read_sql_table : Read SQL dbs table into a DataFrame.
            read_sql_query : Read SQL query into a DataFrame.
        """

        # use sqlite3
        # from . import conn_sqlite
        # conn_sqlite.Demo.look_database()

        import sqlite3
        cnn = sqlite3.connect('students.db')
        df = pd.read_sql(sql="""SELECT * FROM score""", con=cnn)
        print(
            "# read_sql读取数据库数据\n"
            "# 使用sqlite3连接数据库students.db\n"
            " >>> conn = sqlite3.connect('students.db')\n"
            "# 执行SQL查询命令，读取数据到DataFrame数据集\n"
            ' >>> df = pd.read_sql(sql="select * from score", con=conn)\n'
            " >>> print(df)\n"
            f"{df}\n"
            f" >>> conn.close()\n"
            )

        return df

    @staticmethod
    def read_sql_query():
        """
        read_sql_query(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None,
                       chunksize: Union[int, NoneType] = None)
                        -> Union[pandas.core.frame.DataFrame, Iterator[pandas.core.frame.DataFrame]]

            Read SQL query into a DataFrame.

            Returns a DataFrame corresponding to the result set of the query
            string. Optionally provide an `index_col` parameter to use one of the
            columns as the index, otherwise default integer index will be used.

            Parameters
            ----------
            sql : str SQL query or SQLAlchemy Selectable (select or text object)
                SQL query to be executed.
            con : SQLAlchemy connectable, str, or sqlite3 connection
                Using SQLAlchemy makes it possible to use any DB supported by that
                library. If a DBAPI2 object, only sqlite3 is supported.
            index_col : str or list of str, optional, default: None
                Column(s) to set as index(MultiIndex).
            coerce_float : bool, default True
                Attempts to convert values of non-string, non-numeric objects (like
                decimal.Decimal) to floating point. Useful for SQL result sets.
            params : list, tuple or dict, optional, default: None
                List of parameters to pass to execute method.  The syntax used
                to pass parameters is dbs driver dependent. Check your
                dbs driver documentation for which of the five syntax styles,
                described in PEP 249's paramstyle, is supported.
                Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
            parse_dates : list or dict, default: None
                - List of column names to parse as dates.
                - Dict of ``{column_name: format string}`` where format string is
                  strftime compatible in case of parsing string times, or is one of
                  (D, s, ns, ms, us) in case of parsing integer timestamps.
                - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
                  to the keyword arguments of :func:`pandas.to_datetime`
                  Especially useful with databases without native Datetime support,
            chunksize : int, default None
                If specified, return an iterator where `chunksize` is the number of
                rows to include in each chunk.

            Returns
            -------
            DataFrame or Iterator[DataFrame]

            See Also
            --------
            read_sql_table : Read SQL dbs table into a DataFrame.
            read_sql : Read SQL query or dbs table into a DataFrame.

            Notes
            -----
            Any datetime values with time zone information parsed via the `parse_dates`
            parameter will be converted to UTC.
        """

        engine = sqlalchemy.create_engine("sqlite+pysqlite:///students.db", encoding='utf8')
        conn = engine.connect()

        # DB-API
        # result = conn.execute("""select * from score""")
        # data = result.fetchall()

        # Pandas
        df = pd.read_sql_query(sql="select * from score", con=conn)

        conn.close()

        print(
            "# read_sql_query读取数据库数据\n"
            "# 使用驱动模块pysqlite作为连接数据库引擎, 连接数据库students.db\n"
            " >>> engine = sqlalchemy.create_engine('sqlite+pysqlite:///students.db')\n"
            " >>> conn = engine.connect()\n"
            "# 执行SQL查询命令，读取数据到DataFrame数据集\n"
            ' >>> df = pd.read_sql_query(sql="select * from score", con=conn)\n'
            " >>> print(df)\n"
            f"{df}\n"
            f" >>> conn.close()\n"
        )
        return

    @staticmethod
    def read_sql_table():
        """
        read_sql_table(table_name, con, schema=None, index_col=None, coerce_float=True, parse_dates=None, columns=None,
                       chunksize: Union[int, NoneType] = None)
                        -> Union[pandas.core.frame.DataFrame, Iterator[pandas.core.frame.DataFrame]]

            Read SQL dbs table into a DataFrame.

            Given a table name and a SQLAlchemy connectable, returns a DataFrame.
            This function does not support DBAPI connections.

            Parameters
            ----------
            table_name : str
                Name of SQL table in dbs.
            con : SQLAlchemy connectable or str
                A dbs URI could be provided as str.
                SQLite DBAPI connection mode not supported.
            schema : str, default None
                Name of SQL schema in dbs to query (if dbs flavor
                supports this). Uses default schema if None (default).
            index_col : str or list of str, optional, default: None
                Column(s) to set as index(MultiIndex).
            coerce_float : bool, default True
                Attempts to convert values of non-string, non-numeric objects (like
                decimal.Decimal) to floating point. Can result in loss of Precision.
            parse_dates : list or dict, default None
                - List of column names to parse as dates.
                - Dict of ``{column_name: format string}`` where format string is
                  strftime compatible in case of parsing string times or is one of
                  (D, s, ns, ms, us) in case of parsing integer timestamps.
                - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
                  to the keyword arguments of :func:`pandas.to_datetime`
                  Especially useful with databases without native Datetime support,
                  such as SQLite.
            columns : list, default None
                List of column names to select from SQL table.
            chunksize : int, default None
                If specified, returns an iterator where `chunksize` is the number of
                rows to include in each chunk.

            Returns
            -------
            DataFrame or Iterator[DataFrame]
                A SQL table is returned as two-dimensional data structure with labeled
                axes.

            See Also
            --------
            read_sql_query : Read SQL query into a DataFrame.
            read_sql : Read SQL query or dbs table into a DataFrame.

            Notes
            -----
            Any datetime values with time zone information will be converted to UTC.

            Examples
            --------
            >>> pd.read_sql_table('table_name', 'postgres:///db_name')  # doctest:+SKIP
        """

        engine = sqlalchemy.create_engine("sqlite+pysqlite:///students.db", encoding='utf8')
        conn = engine.connect()
        df = pd.read_sql_table("score", con=conn)
        conn.close()
        print(
            "# read_sql_table读取Sqlite数据库数据\n"
            "# 读取sqlite接数据库\n"
            " >>> engine = sqlalchemy.create_engine('sqlite+pysqlite:///students.db')\n"
            " >>> conn = engine.connect()\n"
            ' >>> df = pd.read_sql_table("score", con=conn)\n'
            " >>> print(df)\n"
            f"{df}\n"
            f" >>> conn.close()"
        )

        # mysql连接数据库模式：mysql+数据库驱动://用户名:密码@localhost:3306/数据库？参数=参数值
        conn_str = "mysql+pymysql://syntax:syntax@127.0.0.1:3306/students?charset=utf8"
        engine = sqlalchemy.create_engine(conn_str)
        conn = engine.connect()
        df = pd.read_sql_table("score", con=conn)
        conn.close()
        print(
            f'# 使用Pandas.read_sql_table读取MySql数据库数据\n'
            f"# mysql连接数据库模式：mysql+数据库驱动://用户名：密码@localhost:3306/数据库？参数=参数值\n"
            f' >>> conn_str = "mysql+pymysql://syntax:syntax@127.0.0.1:3306/students?charset=utf8"\n'
            f' >>> engine = sqlalchemy.create_engine(conn_str)\n'
            f' >>> conn = engine.connect()\n'
            f' >>> df = pd.read_sql_table("score", con=conn)\n'
            f' >>> print(df)\n'
            f"{df}\n"
            f" >>> conn.close()"
        )

        return
