# import psycopg2
# from openai import OpenAI
# import pandas as pd
# import time

# # 初始化OpenAI API
# client = OpenAI(
#     api_key="sk-FB9VFg4kAJfs4qCDLlQ10XRxJeNVzNIenLDJw8V9YqH1gzRF",
#     base_url="https://api.chatanywhere.tech/v1"
# )

# # 数据库连接参数
# connection_params = {
#     'dbname': 'imdb_database',
#     'user': 'postgres',
#     'password': '0818',
#     'host': 'localhost',
#     'port': '5432'
# }

# def connect_to_db(params):
#     try:
#         connection = psycopg2.connect(**params)
#         print("成功连接到数据库")
#         return connection
#     except Exception as e:
#         print(f"连接数据库失败: {e}")
#         return None

# def generate_query_prompt():
#     prompt = """
# Generate 200 SQL queries based on the following table structure. Ensure all queries are syntactically correct for PostgreSQL, with JOINs on at least three tables and filter conditions on columns.
#  Do not include sequence numbers, comments, or extraneous characters like quotes or symbols. Only return the SQL queries.

# table structure:

#     table structure：
#     1. aka_name 表：
#        - id：整数，非空，主键
#        - person_id：整数，非空
#        - name：可变字符类型
#        - imdb_index：可变字符类型(3)
#        - name_pcode_cf：可变字符类型(11)
#        - name_pcode_nf：可变字符类型(11)
#        - surname_pcode：可变字符类型(11)
#        - md5sum：可变字符类型(65)

#     2. aka_title 表：
#        - id：整数，非空，主键
#        - movie_id：整数，非空
#        - title：可变字符类型
#        - imdb_index：可变字符类型(4)
#        - kind_id：整数，非空
#        - production_year：整数
#        - phonetic_code：可变字符类型(5)
#        - episode_of_id：整数
#        - season_nr：整数
#        - episode_nr：整数
#        - note：可变字符类型(72)
#        - md5sum：可变字符类型(32)

#     3. cast_info 表：
#        - id：整数，非空，主键
#        - person_id：整数，非空
#        - movie_id：整数，非空
#        - person_role_id：整数
#        - note：可变字符类型
#        - nr_order：整数
#        - role_id：整数，非空

#     4. char_name 表：
#        - id：整数，非空，主键
#        - name：可变字符类型，非空
#        - imdb_index：可变字符类型(2)
#        - imdb_id：整数
#        - name_pcode_nf：可变字符类型(5)
#        - surname_pcode：可变字符类型(5)
#        - md5sum：可变字符类型(32)

#     5. comp_cast_type 表：
#        - id：整数，非空，主键
#        - kind：可变字符类型(32)，非空

#     6. company_name 表：
#        - id：整数，非空，主键
#        - name：可变字符类型，非空
#        - country_code：可变字符类型(6)
#        - imdb_id：整数
#        - name_pcode_nf：可变字符类型(5)
#        - name_pcode_sf：可变字符类型(5)
#        - md5sum：可变字符类型(32)

#     7. company_type 表：
#        - id：整数，非空，主键
#        - kind：可变字符类型(32)

#     8. complete_cast 表：
#        - id：整数，非空，主键
#        - movie_id：整数
#        - subject_id：整数，非空
#        - status_id：整数，非空

#     9. info_type 表：
#        - id：整数，非空，主键
#        - info：可变字符类型(32)，非空

#     10. keyword 表：
#         - id：整数，非空，主键
#         - keyword：可变字符类型，非空
#         - phonetic_code：可变字符类型(5)

#     11. kind_type 表：
#         - id：整数，非空，主键
#         - kind：可变字符类型(15)

#     12. link_type 表：
#         - id：整数，非空，主键
#         - link：可变字符类型(32)，非空

#     13. movie_companies 表：
#         - id：整数，非空，主键
#         - movie_id：整数，非空
#         - company_id：整数，非空
#         - company_type_id：整数，非空
#         - note：可变字符类型

#     14. movie_info_idx 表：
#         - id：整数，非空，主键
#         - movie_id：整数，非空
#         - info_type_id：整数，非空
#         - info：可变字符类型，非空
#         - note：可变字符类型(1)

#     15. movie_keyword 表：
#         - id：整数，非空，主键
#         - movie_id：整数，非空
#         - keyword_id：整数，非空

#     16. movie_link 表：
#         - id：整数，非空，主键
#         - movie_id：整数，非空
#         - linked_movie_id：整数，非空
#         - link_type_id：整数，非空

#     17. name 表：
#         - id：整数，非空，主键
#         - name：可变字符类型，非空
#         - imdb_index：可变字符类型(9)
#         - imdb_id：整数
#         - gender：可变字符类型(1)
#         - name_pcode_cf：可变字符类型(5)
#         - name_pcode_nf：可变字符类型(5)
#         - surname_pcode：可变字符类型(5)
#         - md5sum：可变字符类型(32)

#     18. role_type 表：
#         - id：整数，非空，主键
#         - role：可变字符类型(32)，非空

#     19. title 表：
#         - id：整数，非空，主键
#         - title：可变字符类型，非空
#         - imdb_index：可变字符类型(5)
#         - kind_id：整数，非空
#         - production_year：整数
#         - imdb_id：整数
#         - phonetic_code：可变字符类型(5)
#         - episode_of_id：整数
#         - season_nr：整数
#         - episode_nr：整数
#         - series_years：可变字符类型(49)
#         - md5sum：可变字符类型(32)

#     20. movie_info 表：
#         - id：整数，非空，主键
#         - movie_id：整数，非空
#         - info_type_id：整数，非空
#         - info：可变字符类型，非空
#         - note：可变字符类型

#     21. person_info 表：
#         - id：整数，非空，主键
#         - person_id：整数，非空
#         - info_type_id：整数，非空
#         - info：可变字符类型，非空
#         - note：可变字符类型
#     """
#     return prompt

# def clean_query(query):
#     # 清除序号、注释符号和多余字符
#     cleaned_query = query.replace("```sql", "").replace("```", "").strip()
    
#     # 使用正则表达式移除可能的序号
#     import re
#     cleaned_query = re.sub(r'-- Query \d+', '', cleaned_query)
#     cleaned_query = re.sub(r'\d+\.\s*', '', cleaned_query)  # 清除像 "12." 这样的序号
    
#     # 确保没有多余的空格或行
#     cleaned_query = ' '.join(cleaned_query.split())
    
#     # 返回去除多余字符的 SQL 查询
#     return cleaned_query

# def generate_queries(prompt, num_queries=200):
#     response = client.chat.completions.create(
#         model="gpt-4",
#         messages=[
#             {"role": "user", "content": prompt}
#         ],
#         max_tokens=4096,
#         n=1
#     )
    
#     raw_content = response.choices[0].message.content.strip()
    
#     # 用分号分割查询，并清理每个查询
#     queries = [clean_query(query) for query in raw_content.split(';') if query.strip()]
    
#     # 只返回指定数量的查询
#     return queries[:num_queries]


# def validate_query(cursor, connection, query):
#     try:
#         cursor.execute(f"EXPLAIN {query}")
#         explain_output = cursor.fetchall()
#         connection.commit()  # 提交事务
#         return True, explain_output
#     except Exception as e:
#         connection.rollback()  # 回滚事务
#         return False, str(e)

# def main():
#     connection = connect_to_db(connection_params)
#     if not connection:
#         return

#     cursor = connection.cursor()
    
#     prompt = generate_query_prompt()
    
#     start_time = time.time()
#     queries = generate_queries(prompt)
#     end_time = time.time()

#     generation_time = end_time - start_time
#     avg_generation_time = generation_time / len(queries)

#     log_file_path = r"D:\database\queries18.sql"
#     with open(log_file_path, 'w') as f:
#         for idx, query in enumerate(queries):
#             f.write(f"-- Query {idx + 1}\n")
#             f.write(query + ";\n\n")

#     results = []
#     for idx, query in enumerate(queries):
#         valid, explain_output_or_error = validate_query(cursor, connection, query)
#         if valid:
#             validation_result = "Passed"
#             explain_output = explain_output_or_error
#         else:
#             validation_result = "Failed"
#             explain_output = explain_output_or_error
        
#         results.append({
#             "Query ID": idx + 1,
#             "Generated SQL Query": query,
#             "Generation Time": avg_generation_time,
#             "Validation Result": validation_result,
#             "EXPLAIN Output": explain_output if validation_result == "Passed" else "",
#             "Error Message": explain_output if validation_result == "Failed" else "",
#             "Prompt": prompt,
#         })

#     df = pd.DataFrame(results)
#     excel_file_path = r"C:\Users\Aphro\Desktop\query_log18.xlsx"
#     df.to_excel(excel_file_path, index=False)

#     cursor.close()
#     connection.close()
#     print(f"查询已保存到 {log_file_path}")
#     print(f"查询结果已保存到 {excel_file_path}")

# if __name__ == "__main__":
#     main()











# import psycopg2
# import pandas as pd
# import time
# from openai import OpenAI

# # 初始化OpenAI API
# client = OpenAI(
#     api_key="sk-FB9VFg4kAJfs4qCDLlQ10XRxJeNVzNIenLDJw8V9YqH1gzRF",
#     base_url="https://api.chatanywhere.tech/v1"
# )

# # 数据库连接参数
# connection_params = {
#     'dbname': 'imdb_database',
#     'user': 'postgres',
#     'password': '0818',
#     'host': 'localhost',
#     'port': '5432'
# }

# def connect_to_db(params):
#     try:
#         connection = psycopg2.connect(**params)
#         print("成功连接到数据库")
#         return connection
#     except Exception as e:
#         print(f"连接数据库失败: {e}")
#         return None

# def generate_query_prompt():
#     prompt = """
# Generate 200 SQL queries based on the following table structure. 
# Ensure all queries are syntactically correct for PostgreSQL, with JOINs on at least five tables and filter conditions on columns.
# Do not include sequence numbers, comments, or extraneous characters like quotes or symbols. Only return the SQL queries.

# table structure：

# ... (省略其他表结构)
# """
#     return prompt

# def count_joins(query):
#     # 计算查询中的 JOIN 数量
#     join_count = query.upper().count('JOIN')
#     return join_count

# def clean_query(query):
#     # 清除序号、注释符号和多余字符
#     cleaned_query = query.strip()
#     cleaned_query = cleaned_query.split('--', 1)[0].strip()  # 移除--之后的内容
#     cleaned_query = ' '.join(cleaned_query.split())  # 移除多余空格
#     return cleaned_query, count_joins(cleaned_query)

# def generate_queries(prompt, num_queries=200):
#     response = client.chat.completions.create(
#         model="gpt-4",
#         messages=[
#             {"role": "user", "content": prompt}
#         ],
#         max_tokens=8192,
#         n=1
#     )
    
#     raw_content = response.choices[0].message.content.strip()
    
#     queries = [clean_query(query) for query in raw_content.split(';') if query.strip()]
    
#     return queries[:num_queries]

# def validate_query(cursor, connection, query):
#     try:
#         cursor.execute(f"EXPLAIN {query}")
#         explain_output = cursor.fetchall()
#         connection.commit()
#         return True, explain_output
#     except Exception as e:
#         connection.rollback()
#         return False, str(e)

# def correct_query_with_llm(failed_query, error_message):
#     prompt = f"Correct the following SQL query for PostgreSQL based on the error:\n\nQuery: {failed_query}\nError: {error_message}\n"
#     response = client.chat.completions.create(
#         model="gpt-4",
#         messages=[
#             {"role": "user", "content": prompt}
#         ],
#         max_tokens=8192,
#         n=1
#     )
    
#     corrected_query = response.choices[0].message.content.strip()
#     return clean_query(corrected_query)

# def main():
#     connection = connect_to_db(connection_params)
#     if not connection:
#         return

#     cursor = connection.cursor()
    
#     prompt = generate_query_prompt()
    
#     start_time = time.time()
#     queries = generate_queries(prompt)
#     end_time = time.time()

#     generation_time = end_time - start_time
#     avg_generation_time = generation_time / len(queries)

#     log_file_path = r"D:\database\queries20.sql"
#     with open(log_file_path, 'w') as f:
#         for idx, (query, join_count) in enumerate(queries):
#             f.write(f"-- Query {idx + 1}\n")
#             f.write(f"-- Join Count: {join_count}\n")
#             f.write(query + ";\n\n")

#     results = []
#     for idx, (query, join_count) in enumerate(queries):
#         valid, explain_output_or_error = validate_query(cursor, connection, query)
        
#         if not valid:
#             print(f"Query {idx + 1} failed, sending to LLM for correction.")
#             corrected_query, corrected_join_count = correct_query_with_llm(query, explain_output_or_error)
#             valid, explain_output_or_error = validate_query(cursor, connection, corrected_query)
            
#             if valid:
#                 validation_result = "Passed after correction"
#                 explain_output = explain_output_or_error
#             else:
#                 validation_result = "Failed after correction"
#                 explain_output = explain_output_or_error
#         else:
#             validation_result = "Passed"
#             explain_output = explain_output_or_error
        
#         results.append({
#             "Query ID": idx + 1,
#             "Generated SQL Query": query,
#             "Corrected SQL Query": corrected_query if not valid else "",
#             "Join Count": join_count,
#             "Generation Time": avg_generation_time,
#             "Validation Result": validation_result,
#             "EXPLAIN Output": explain_output if validation_result.startswith("Passed") else "",
#             "Error Message": explain_output if validation_result.startswith("Failed") else "",
#             "Prompt": prompt,
#         })

#     df = pd.DataFrame(results)
#     excel_file_path = r"C:\Users\Aphro\Desktop\query_log20.xlsx"
#     df.to_excel(excel_file_path, index=False)

#     cursor.close()
#     connection.close()
#     print(f"查询已保存到 {log_file_path}")
#     print(f"查询结果已保存到 {excel_file_path}")

# if __name__ == "__main__":
#     main()

    

# import psycopg2
# import pandas as pd
# import time
# from openai import OpenAI

# # 初始化OpenAI API
# client = OpenAI(
#     api_key="sk-FB9VFg4kAJfs4qCDLlQ10XRxJeNVzNIenLDJw8V9YqH1gzRF",
#     base_url="https://api.chatanywhere.tech/v1"
# )

# # 数据库连接参数
# connection_params = {
#     'dbname': 'imdb_database',
#     'user': 'postgres',
#     'password': '0818',
#     'host': 'localhost',
#     'port': '5432'
# }

# def connect_to_db(params):
#     try:
#         connection = psycopg2.connect(**params)
#         print("成功连接到数据库")
#         return connection
#     except Exception as e:
#         print(f"连接数据库失败: {e}")
#         return None

# def generate_query_prompt():
#     prompt = """
# Generate 200 SQL queries based on the following table structure. 
# Ensure all queries are syntactically correct for PostgreSQL, with JOINs on at least five tables and filter conditions on columns.
# Do not include sequence numbers, comments, or extraneous characters like quotes or symbols. Only return the SQL queries.

# table structure：

# ... (省略其他表结构)
# """
#     return prompt

# def count_joins(query):
#     # 计算查询中的 JOIN 数量
#     join_count = query.count('JOIN')
#     return join_count

# def clean_query(query):
#     # 清除序号、注释符号和多余字符
#     cleaned_query = query.strip()
#     cleaned_query = cleaned_query.split('--', 1)[0].strip()  # 移除--之后的内容
#     cleaned_query = ' '.join(cleaned_query.split())  # 移除多余空格
#     return cleaned_query, count_joins(cleaned_query)

# def generate_queries(prompt, num_queries=200):
#     response = client.chat.completions.create(
#         model="gpt-4",
#         messages=[
#             {"role": "user", "content": prompt}
#         ],
#         max_tokens=8192,
#         n=1
#     )
    
#     raw_content = response.choices[0].message.content.strip()
    
#     queries = [clean_query(query) for query in raw_content.split(';') if query.strip()]
    
#     return queries[:num_queries]

# def validate_query(cursor, connection, query):
#     try:
#         cursor.execute(f"EXPLAIN {query}")
#         explain_output = cursor.fetchall()
#         connection.commit()
#         return True, explain_output
#     except Exception as e:
#         connection.rollback()
#         return False, str(e)

# def correct_query_with_llm(failed_query, error_message):
#     prompt = f"Correct the following SQL query for PostgreSQL based on the error:\n\nQuery: {failed_query}\nError: {error_message}\n"
#     response = client.chat.completions.create(
#         model="gpt-4",
#         messages=[
#             {"role": "user", "content": prompt}
#         ],
#         max_tokens=8192,
#         n=1
#     )
    
#     corrected_query = response.choices[0].message.content.strip()
#     return clean_query(corrected_query)

# def main():
#     connection = connect_to_db(connection_params)
#     if not connection:
#         return

#     cursor = connection.cursor()
    
#     prompt = generate_query_prompt()
    
#     start_time = time.time()
#     queries = generate_queries(prompt)
#     end_time = time.time()

#     generation_time = end_time - start_time
#     avg_generation_time = generation_time / len(queries)

#     total_joins = 0
#     for query, join_count in queries:
#         total_joins += join_count

#     avg_joins = total_joins / len(queries) if queries else 0

#     log_file_path = r"D:\database\queries22.sql"
#     with open(log_file_path, 'w') as f:
#         for idx, (query, join_count) in enumerate(queries):
#             f.write(f"-- Query {idx + 1}\n")
#             f.write(f"-- Join Count: {join_count}\n")
#             f.write(query + ";\n\n")

#     results = []
#     for idx, (query, join_count) in enumerate(queries):
#         valid, explain_output_or_error = validate_query(cursor, connection, query)
        
#         if not valid:
#             print(f"Query {idx + 1} failed, sending to LLM for correction.")
#             corrected_query, corrected_join_count = correct_query_with_llm(query, explain_output_or_error)
#             valid, explain_output_or_error = validate_query(cursor, connection, corrected_query)
            
#             if valid:
#                 validation_result = "Passed after correction"
#                 explain_output = explain_output_or_error
#             else:
#                 validation_result = "Failed after correction"
#                 explain_output = explain_output_or_error
#         else:
#             validation_result = "Passed"
#             explain_output = explain_output_or_error
        
#         results.append({
#             "Query ID": idx + 1,
#             "Generated SQL Query": query,
#             "Corrected SQL Query": corrected_query if not valid else "",
#             "Join Count": join_count,
#             "Generation Time": avg_generation_time,
#             "Validation Result": validation_result,
#             "EXPLAIN Output": explain_output if validation_result.startswith("Passed") else "",
#             "Error Message": explain_output if validation_result.startswith("Failed") else "",
#             "Prompt": prompt,
#         })

#     df = pd.DataFrame(results)
#     excel_file_path = r"C:\Users\Aphro\Desktop\query_log22.xlsx"
#     df.to_excel(excel_file_path, index=False)

#     cursor.close()
#     connection.close()
#     print(f"查询已保存到 {log_file_path}")
#     print(f"查询结果已保存到 {excel_file_path}")
#     print(f"平均 JOIN 数量: {avg_joins}")

# if __name__ == "__main__":
#     main()

# import psycopg2
# import pandas as pd
# import time
# from openai import OpenAI

# # 初始化OpenAI API
# client = OpenAI(
#     api_key="sk-FB9VFg4kAJfs4qCDLlQ10XRxJeNVzNIenLDJw8V9YqH1gzRF",
#     base_url="https://api.chatanywhere.tech/v1"
# )

# # 数据库连接参数
# connection_params = {
#     'dbname': 'imdb_database',
#     'user': 'postgres',
#     'password': '0818',
#     'host': 'localhost',
#     'port': '5432'
# }

# def connect_to_db(params):
#     try:
#         connection = psycopg2.connect(**params)
#         print("成功连接到数据库")
#         return connection
#     except Exception as e:
#         print(f"连接数据库失败: {e}")
#         return None

# def generate_query_prompt():
#     # ... 省略其他内容 ...
#     return prompt

# def count_joins(query):
#     # 计算查询中的 JOIN 数量
#     join_keywords = ['JOIN', 'LEFT JOIN', 'RIGHT JOIN', 'INNER JOIN', 'FULL OUTER JOIN']
#     join_count = 0
#     for keyword in join_keywords:
#         join_count += query.count(keyword)
#     return join_count

# def clean_query(query):
#     # 清除序号、注释符号和多余字符
#     cleaned_query = query.strip()
#     cleaned_query = cleaned_query.split('--', 1)[0].strip()  # 移除--之后的内容
#     cleaned_query = ' '.join(cleaned_query.split())  # 移除多余空格
#     return cleaned_query, count_joins(cleaned_query)

# def generate_queries(prompt, num_queries=200):
#     # ... 省略其他内容 ...
#     return queries[:num_queries]

# def validate_query(cursor, connection, query):
#     # ... 省略其他内容 ...
#     return valid, explain_output_or_error

# def correct_query_with_llm(failed_query, error_message):
#     # ... 省略其他内容 ...
#     return clean_query(corrected_query)

# def main():
#     connection = connect_to_db(connection_params)
#     if not connection:
#         return

#     cursor = connection.cursor()
    
#     prompt = generate_query_prompt()
    
#     start_time = time.time()
#     queries = generate_queries(prompt)
#     end_time = time.time()

#     generation_time = end_time - start_time
#     avg_generation_time = generation_time / len(queries)

#     total_joins = 0
#     for query, join_count in queries:
#         total_joins += join_count

#     avg_joins = total_joins / len(queries) if queries else 0

#     log_file_path = r"D:\database\queries20.sql"
#     with open(log_file_path, 'w') as f:
#         for idx, (query, join_count) in enumerate(queries):
#             f.write(f"-- Query {idx + 1}\n")
#             f.write(f"-- Join Count: {join_count}\n")
#             f.write(query + ";\n\n")

#     results = []
#     for idx, (query, join_count) in enumerate(queries):
#         valid, explain_output_or_error = validate_query(cursor, connection, query)
        
#         if not valid:
#             print(f"Query {idx + 1} failed, sending to LLM for correction.")
#             corrected_query, corrected_join_count = correct_query_with_llm(query, explain_output_or_error)
#             valid, explain_output_or_error = validate_query(cursor, connection, corrected_query)
            
#             if valid:
#                 validation_result = "Passed after correction"
#                 explain_output = explain_output_or_error
#             else:
#                 validation_result = "Failed after correction"
#                 explain_output = explain_output_or_error
#         else:
#             validation_result = "Passed"
#             explain_output = explain_output_or_error
        
#         results.append({
#             "Query ID": idx + 1,
#             "Generated SQL Query": query,
#             "Corrected SQL Query": corrected_query if not valid else "",
#             "Join Count": join_count,
#             "Generation Time": avg_generation_time,
#             "Validation Result": validation_result,
#             "EXPLAIN Output": explain_output if validation_result.startswith("Passed") else "",
#             "Error Message": explain_output if validation_result.startswith("Failed") else "",
#             "Prompt": prompt,
#         })

#     df = pd.DataFrame(results)
#     excel_file_path = r"C:\Users\Aphro\Desktop\query_log20.xlsx"
#     df.to_excel(excel_file_path, index=False)

#     cursor.close()
#     connection.close()
#     print(f"查询已保存到 {log_file_path}")
#     print(f"查询结果已保存到 {excel_file_path}")
#     print(f"平均 JOIN 数量: {avg_joins}")

# if __name__ == "__main__":
#     main()

import psycopg2
import pandas as pd
import time
from openai import OpenAI

# 初始化OpenAI API
client = OpenAI(
    api_key="sk-FB9VFg4kAJfs4qCDLlQ10XRxJeNVzNIenLDJw8V9YqH1gzRF",
    base_url="https://api.chatanywhere.tech/v1"
)

# 数据库连接参数
connection_params = {
    'dbname': 'imdb_database',
    'user': 'postgres',
    'password': '0818',
    'host': 'localhost',
    'port': '5432'
}

def connect_to_db(params):
    try:
        connection = psycopg2.connect(**params)
        print("成功连接到数据库")
        return connection
    except Exception as e:
        print(f"连接数据库失败: {e}")
        return None

def generate_query_prompt():
    prompt = """
Generate 200 SQL queries based on the following table structure. 
Ensure all queries are syntactically correct for PostgreSQL, with JOINs on at least five tables and filter conditions on columns.
Do not include sequence numbers, comments, or extraneous characters like quotes or symbols. Only return the SQL queries.

table structure：

... (省略其他表结构)
"""
    return prompt

def count_joins(query):
    # 计算查询中的 JOIN 子句数量和每个 JOIN 子句中涉及的表数
    join_count = 0
    tables_count = 0
    lines = query.split('\n')
    for line in lines:
        clean_line = ' '.join(line.split())
        if 'JOIN' in clean_line:
            join_count += 1
            # 假设每个 JOIN 子句至少涉及两张表
            tables_count += 2
    return join_count, tables_count
def clean_query(query):
    # 清除序号、注释符号和多余字符
    cleaned_query = query.strip()
    cleaned_query = cleaned_query.split('--', 1)[0].strip()  # 移除--之后的内容
    cleaned_query = ' '.join(cleaned_query.split())  # 移除多余空格
    return cleaned_query, count_joins(cleaned_query)

def generate_queries(prompt, num_queries=200):
    response = client.chat.completions.create(
        model="gpt-4",
        messages=[
            {"role": "user", "content": prompt}
        ],
        max_tokens=8192,
        n=1
    )
    
    raw_content = response.choices[0].message.content.strip()
    
    queries = [clean_query(query) for query in raw_content.split(';') if query.strip()]
    
    return queries[:num_queries]

def validate_query(cursor, connection, query):
    try:
        cursor.execute(f"EXPLAIN {query}")
        explain_output = cursor.fetchall()
        connection.commit()
        return True, explain_output
    except Exception as e:
        connection.rollback()
        return False, str(e)

def correct_query_with_llm(failed_query, error_message):
    prompt = f"Correct the following SQL query for PostgreSQL based on the error:\n\nQuery: {failed_query}\nError: {error_message}\n"
    response = client.chat.completions.create(
        model="gpt-4",
        messages=[
            {"role": "user", "content": prompt}
        ],
        max_tokens=8192,
        n=1
    )
    
    corrected_query = response.choices[0].message.content.strip()
    return clean_query(corrected_query)

def main():
    connection = connect_to_db(connection_params)
    if not connection:
        return

    cursor = connection.cursor()
    
    prompt = generate_query_prompt()
    
    start_time = time.time()
    queries = generate_queries(prompt)
    end_time = time.time()

    total_joins = 0
    total_tables = 0
    for query, join_count in queries:
        total_joins += join_count[0]
        total_tables += join_count[1]

    avg_generation_time = (end_time - start_time) / len(queries)
    avg_joins = total_joins / len(queries) if queries else 0
    avg_tables = total_tables / len(queries) if queries else 0

    log_file_path = r"D:\database\queries20.sql"
    with open(log_file_path, 'w') as f:
        for idx, (query, join_count) in enumerate(queries):
            f.write(f"-- Query {idx + 1}\n")
            f.write(f"-- Join Count: {join_count[0]}\n")
            f.write(f"-- Tables Count: {join_count[1]}\n")
            f.write(query + ";\n\n")

    results = []
    for idx, (query, join_count) in enumerate(queries):
        valid, explain_output_or_error = validate_query(cursor, connection, query)
        
        if not valid:
            corrected_query, corrected_join_count = correct_query_with_llm(query, explain_output_or_error)
            valid, explain_output_or_error = validate_query(cursor, connection, corrected_query)
            if valid:
                validation_result = "Passed after correction"
                corrected_query = clean_query(corrected_query)
                join_count = corrected_join_count
            else:
                validation_result = "Failed after correction"
        else:
            validation_result = "Passed"
        
        results.append({
            "Query ID": idx + 1,
            "Generated SQL Query": query,
            "Corrected SQL Query": corrected_query if not valid else "",
            "Join Count": join_count[0],
            "Tables Count": join_count[1],
            "Generation Time": avg_generation_time,
            "Validation Result": validation_result,
            "EXPLAIN Output": explain_output_or_error if validation_result.startswith("Passed") else "",
            "Error Message": explain_output_or_error if validation_result.startswith("Failed") else "",
            "Prompt": prompt,
        })

    df = pd.DataFrame(results)
    excel_file_path = r"C:\Users\Aphro\Desktop\query_log20.xlsx"
    df.to_excel(excel_file_path, index=False)

    cursor.close()
    connection.close()
    print(f"查询已保存到 {log_file_path}")
    print(f"查询结果已保存到 {excel_file_path}")
    print(f"平均生成时间: {avg_generation_time} 秒")
    print(f"平均 JOIN 子句数量: {avg_joins}")
    print(f"平均参与 JOIN 操作的表数: {avg_tables}")

if __name__ == "__main__":
    main()