DEVAI-benchmark commited on
Commit
6822471
1 Parent(s): 478a906

Upload 55 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. instances/01_Image_Classification_ResNet18_Fashion_MNIST_DL.json +77 -0
  2. instances/02_Maze_Solver_Q_Learning_Gridworld_RL.json +69 -0
  3. instances/03_Text_Classification_NaiveBayes_20Newsgroups_ML.json +71 -0
  4. instances/04_Text_Generation_GPT2_Prompts_DL.json +78 -0
  5. instances/05_Game_Simulation_DQN_CartPole_v1_RL.json +68 -0
  6. instances/06_Sentiment_Analysis_SVM_Sentiment140_ML.json +73 -0
  7. instances/07_Image_Super_Resolution_SRCNN_Set5_DL.json +70 -0
  8. instances/08_Robot_Control_PPO_PyBullet_RL.json +75 -0
  9. instances/09_Recommendation_System_NCF_MovieLens_ML.json +79 -0
  10. instances/10_Face_Recognition_FaceNet_LFW_DL.json +84 -0
  11. instances/11_House_Price_Prediction_LinearRegression_BostonHousing_ML.json +85 -0
  12. instances/12_Spam_Detection_SVM_Enron_ML.json +98 -0
  13. instances/13_Style_Transfer_Perceptual_Loss_CustomImages_DL.json +95 -0
  14. instances/14_Customer_Churn_Prediction_LogisticRegression_Telco_ML.json +94 -0
  15. instances/15_Image_Captioning_ShowAndTell_Flickr8k_DL.json +82 -0
  16. instances/16_Credit_Scoring_DecisionTree_GermanCredit_ML.json +96 -0
  17. instances/17_Heart_Disease_Prediction_XGBoost_UCI_ML.json +113 -0
  18. instances/18_Image_Enhancement_SRGAN_DIV2K_DL.json +83 -0
  19. instances/19_Time_Series_Forecasting_Seq2Seq_LSTM_Rossmann_ML.json +87 -0
  20. instances/20_Car_Price_Prediction_RandomForest_CarPrices_ML.json +95 -0
  21. instances/21_Iris_Classification_SVM_Iris_ML.json +91 -0
  22. instances/22_Sentiment_Analysis_LSTM_IMDb_DL.json +88 -0
  23. instances/23_Wine_Quality_Prediction_DecisionTree_WineQuality_ML.json +97 -0
  24. instances/24_Diabetes_Prediction_LogisticRegression_PimaIndians_ML.json +102 -0
  25. instances/25_Speech_Emotion_Recognition_CNN_LSTM_RAVDESS_DL.json +89 -0
  26. instances/26_Mushroom_Classification_RandomForest_Mushroom_ML.json +94 -0
  27. instances/27_Image_Generation_DCGAN_MNIST_DL.json +94 -0
  28. instances/28_Stock_Price_Prediction_LSTM_YahooFinance_ML.json +82 -0
  29. instances/29_Financial_Time_Series_Prediction_LSTM_ML.json +90 -0
  30. instances/30_Image_Segmentation_UNet_PascalVOC_DL.json +90 -0
  31. instances/31_Cancer_Prediction_SVM_BreastCancer_ML.json +87 -0
  32. instances/32_Weather_Data_Analysis_LinearRegression_Weather_ML.json +88 -0
  33. instances/33_Object_Detection_YOLOv3_COCO_DL.json +93 -0
  34. instances/34_Customer_Segmentation_KMeans_CustomerSegmentation_ML.json +88 -0
  35. instances/35_Loan_Default_Prediction_RandomForest_LendingClub_ML.json +90 -0
  36. instances/36_Music_Emotion_Classification_SVM_GTZAN_ML.json +101 -0
  37. instances/37_Lane_Detection_ResNet50_TuSimple_DL.json +99 -0
  38. instances/38_Object_Tracking_Siamese_OTB50_DL.json +102 -0
  39. instances/39_Drug_Response_Prediction_SVM_GDSC_ML.json +102 -0
  40. instances/40_Text_Summarization_BART_CNNDailyMail_DL.json +90 -0
  41. instances/41_Stock_Classification_KNN_YahooFinance_ML.json +92 -0
  42. instances/42_Medical_Image_Classification_DenseNet121_ChestXray_DL.json +87 -0
  43. instances/43_Social_Network_Analysis_GCN_Cora_ML.json +91 -0
  44. instances/44_Text_Classification_BERT_AGNews_DL.json +83 -0
  45. instances/45_Product_Recommendation_MatrixFactorization_AmazonReviews_ML.json +87 -0
  46. instances/46_Speech_Recognition_DeepSpeech_LibriSpeech_DL.json +90 -0
  47. instances/47_Network_Traffic_Analysis_KMeans_NetworkTraffic_ML.json +90 -0
  48. instances/48_Stock_Trading_Simulation_PPO_HistoricalData_RL.json +95 -0
  49. instances/49_Explainable_AI_LIME_Titanic_ML.json +91 -0
  50. instances/50_Math_Problem_Solving_Transformer_DeepMindMath_DL.json +112 -0
instances/01_Image_Classification_ResNet18_Fashion_MNIST_DL.json ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "01_Image_Classification_ResNet18_Fashion_MNIST_DL",
3
+ "query": "Hey! Could you help me set up a system to classify images from the Fashion-MNIST dataset using the ResNet-18 model in PyTorch? The Fashion-MNIST dataset should be loaded in `src/data_loader.py`. I'd like the system to show the training progress with the tqdm library in the training loop in `src/train.py` and to perform some data augmentation with `torchvision.transforms` (like rotation and scaling) to make the model more robust. The latter should be implemented in `src/data_loader.py` and the ResNet-18 model should be imported from PyTorch in `src/model.py`. Once the training is done, please save the trained model as `fashionnet.pt` in the `models/saved_models/` directory. It would be great if the training process could be as efficient as possible. Also, please try to write the code in an easily understandable and easily maintainable style. If you can, it would be awesome to include some insights into model interpretability too, such as by using Grad-CAM or something similar. Thanks a lot!",
4
+ "tags": [
5
+ "Classification",
6
+ "Computer Vision",
7
+ "Supervised Learning"
8
+ ],
9
+ "requirements": [
10
+ {
11
+ "requirement_id": 0,
12
+ "prerequisites": [],
13
+ "criteria": "The \"Fashion-MNIST\" dataset is loaded in `src/data_loader.py`.",
14
+ "category": "Dataset or Environment",
15
+ "satisfied": null
16
+ },
17
+ {
18
+ "requirement_id": 1,
19
+ "prerequisites": [
20
+ 0
21
+ ],
22
+ "criteria": "Data augmentation is performed using `torchvision.transforms`, including rotation, scaling, etc. The implementation is in `src/data_loader.py`.",
23
+ "category": "Data preprocessing and postprocessing",
24
+ "satisfied": null
25
+ },
26
+ {
27
+ "requirement_id": 2,
28
+ "prerequisites": [],
29
+ "criteria": "The \"ResNet-18\" model is imported from \"PyTorch\" in `src/model.py`.",
30
+ "category": "Machine Learning Method",
31
+ "satisfied": null
32
+ },
33
+ {
34
+ "requirement_id": 3,
35
+ "prerequisites": [
36
+ 0,
37
+ 1,
38
+ 2
39
+ ],
40
+ "criteria": "Training progress is displayed using the \"tqdm\" library in the main training loop in `src/train.py`.",
41
+ "category": "Visualization",
42
+ "satisfied": null
43
+ },
44
+ {
45
+ "requirement_id": 4,
46
+ "prerequisites": [
47
+ 0,
48
+ 1,
49
+ 2,
50
+ 3
51
+ ],
52
+ "criteria": "The trained model is saved under the file name `fashionnet.pt` in `models/saved_models/`.",
53
+ "category": "Save Trained Model",
54
+ "satisfied": null
55
+ }
56
+ ],
57
+ "preferences": [
58
+ {
59
+ "preference_id": 0,
60
+ "criteria": "Code should be written in a clear, understandable and maintainable style with appropriate comments.",
61
+ "satisfied": null
62
+ },
63
+ {
64
+ "preference_id": 1,
65
+ "criteria": "The training process should be efficient.",
66
+ "satisfied": null
67
+ },
68
+ {
69
+ "preference_id": 2,
70
+ "criteria": "Insights of model interpretability should be provided, such as using techniques like Grad-CAM for visualization.",
71
+ "satisfied": null
72
+ }
73
+ ],
74
+ "is_kaggle_api_needed": false,
75
+ "is_training_needed": true,
76
+ "is_web_navigation_needed": false
77
+ }
instances/02_Maze_Solver_Q_Learning_Gridworld_RL.json ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "02_Maze_Solver_Q_Learning_Gridworld_RL",
3
+ "query": "Can you help me create a system to solve maze-style Gridworld tasks using the Q-learning algorithm? The system should use numpy to make the core calculations more efficient and matplotlib for visualizations. The Q-learning algorithm should be implemented in `src/train.py`, and the aptly-named Gridworld environment should be implemented in `src/env.py` in such a way that one could specific the grid size and start/end positions when instantiating it. The system needs to record the learning curve during training, tracking episodes and their corresponding returns, and save it as `results/figures/learning_curve.png`. Additionally, I'd like you to visualize and save the paths taken by the agent in each episode in a file called `results/figures/path_changes.gif`, and save the trained model as `models/saved_models/q_learning_model.npy`. It would be great to have some form of real-time feedback during training, like seeing the progress or getting updates on how the model is learning. Also, if you can, please try and write the code in a way that's easy to modify or extend later on.",
4
+ "tags": [
5
+ "Reinforcement Learning"
6
+ ],
7
+ "requirements": [
8
+ {
9
+ "requirement_id": 0,
10
+ "prerequisites": [],
11
+ "criteria": "The \"Q-learning\" algorithm is used in `src/train.py`.",
12
+ "category": "Machine Learning Method",
13
+ "satisfied": null
14
+ },
15
+ {
16
+ "requirement_id": 1,
17
+ "prerequisites": [],
18
+ "criteria": "The \"Gridworld\" environment is defined in `src/env.py` with the ability for a user to specify a grid size and start/end positions.",
19
+ "category": "Dataset or Environment",
20
+ "satisfied": null
21
+ },
22
+ {
23
+ "requirement_id": 2,
24
+ "prerequisites": [
25
+ 0,
26
+ 1
27
+ ],
28
+ "criteria": "Learning curves are recorded during training, and saved as `results/figures/learning_curve.png`. Episodes and returns are recorded.",
29
+ "category": "Visualization",
30
+ "satisfied": null
31
+ },
32
+ {
33
+ "requirement_id": 3,
34
+ "prerequisites": [
35
+ 0,
36
+ 1,
37
+ 2
38
+ ],
39
+ "criteria": "The learned model is saved as `models/saved_models/q_learning_model.npy`.",
40
+ "category": "Save Trained Model",
41
+ "satisfied": null
42
+ },
43
+ {
44
+ "requirement_id": 4,
45
+ "prerequisites": [
46
+ 0,
47
+ 1
48
+ ],
49
+ "criteria": "Paths taken during learning are visualized and saved as `results/figures/path_changes.gif`.",
50
+ "category": "Visualization",
51
+ "satisfied": null
52
+ }
53
+ ],
54
+ "preferences": [
55
+ {
56
+ "preference_id": 0,
57
+ "criteria": "Some real-time progress or feedback during the training process should be displayed.",
58
+ "satisfied": null
59
+ },
60
+ {
61
+ "preference_id": 1,
62
+ "criteria": "The code should be written in a way that's easy to modify or extend later on.",
63
+ "satisfied": null
64
+ }
65
+ ],
66
+ "is_kaggle_api_needed": false,
67
+ "is_training_needed": true,
68
+ "is_web_navigation_needed": false
69
+ }
instances/03_Text_Classification_NaiveBayes_20Newsgroups_ML.json ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "03_Text_Classification_NaiveBayes_20Newsgroups_ML",
3
+ "query": "Please implement a Naive Bayes classifier for the 20 Newsgroups dataset and save it in a file called `src/model.py`. The dataset should loaded in `src/data_loader.py`. The program should handle data preprocessing, including removing stop words, punctuation, and special characters. Show the improvement of your classifier by generating word clouds before and after training your classifier and saving them as `results/figures/wordcloud_before.png` and `results/figures/wordcloud_after.png`. Please calculate and include TF-IDF features when loading the data in `src/data_loader.py`. Lastly, print out a performance report (including precision, recall, and F1-score) and save it as `results/metrics/performance.txt`. The model should be straightforward to interpret, and the final report should be structured clearly for easy review.",
4
+ "tags": [
5
+ "Classification",
6
+ "Natural Language Processing",
7
+ "Supervised Learning"
8
+ ],
9
+ "requirements": [
10
+ {
11
+ "requirement_id": 0,
12
+ "prerequisites": [],
13
+ "criteria": "The \"20 Newsgroups\" dataset is used in `src/data_loader.py`.",
14
+ "category": "Dataset or Environment",
15
+ "satisfied": null
16
+ },
17
+ {
18
+ "requirement_id": 1,
19
+ "prerequisites": [
20
+ 0
21
+ ],
22
+ "criteria": "Data preprocessing is performed, including removing stop words, punctuation, and special characters. Word clouds are visualized before and after training the classifier, and saved as `results/figures/wordcloud_before.png` and `results/figures/wordcloud_after.png`.",
23
+ "category": "Data preprocessing and postprocessing",
24
+ "satisfied": null
25
+ },
26
+ {
27
+ "requirement_id": 2,
28
+ "prerequisites": [
29
+ 0,
30
+ 1
31
+ ],
32
+ "criteria": "\"TF-IDF\" features are used when loading the data in `src/data_loader.py`.",
33
+ "category": "Data preprocessing and postprocessing",
34
+ "satisfied": null
35
+ },
36
+ {
37
+ "requirement_id": 3,
38
+ "prerequisites": [],
39
+ "criteria": "A \"Naive Bayes classifier\" is implemented in `src/model.py`.",
40
+ "category": "Machine Learning Method",
41
+ "satisfied": null
42
+ },
43
+ {
44
+ "requirement_id": 4,
45
+ "prerequisites": [
46
+ 0,
47
+ 1,
48
+ 2
49
+ ],
50
+ "criteria": "A performance report, including \"precision,\" \"recall,\" and the \"F1-score,\" is printed and saved as `results/metrics/performance.txt`.",
51
+ "category": "Performance Metrics",
52
+ "satisfied": null
53
+ }
54
+ ],
55
+ "preferences": [
56
+ {
57
+ "preference_id": 0,
58
+ "criteria": "The model should be straightforward to interpret.",
59
+ "satisfied": null
60
+ },
61
+ {
62
+ "preference_id": 1,
63
+ "criteria": "The final report should be structured clearly for easy review.",
64
+ "satisfied": null
65
+ }
66
+ ],
67
+ "is_kaggle_api_needed": false,
68
+ "is_training_needed": true,
69
+ "is_web_navigation_needed": false,
70
+ "hint": "In the query, there is a missing word \"be\" after the word \"should\" in \"The dataset should loaded in `src/data_loader.py`\"."
71
+ }
instances/04_Text_Generation_GPT2_Prompts_DL.json ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "04_Text_Generation_GPT2_Prompts_DL",
3
+ "query": "Please build a text generation system by automatically downloading a pre-trained GPT-2 model which you then cache in `models/saved_models/`. The system should receive prompts through loading the current contents of a text file named `data/prompt.txt` which, for demonstration purposes, should contain only the text \"who are you?\" in your implementation. You should automatically correct any typos you run into while reading `data/prompt.txt`. Ensure the generated text is limited to a maximum of 200 characters and your program should save the generated text to `results/generated_text.txt`. Implement post-processing to clean up the generated text by removing extra spaces and correcting common spelling errors. It would be ideal if the system could handle different variations of input prompts efficiently, even with minor typos. Additionally, a user-friendly output format in the saved file (perhaps with timestamps or prompt identifiers) would be nice to have.",
4
+ "tags": [
5
+ "Generative Models",
6
+ "Natural Language Processing"
7
+ ],
8
+ "requirements": [
9
+ {
10
+ "requirement_id": 0,
11
+ "prerequisites": [],
12
+ "criteria": "A pre-trained \"GPT-2\" model is downloaded and cached in `models/saved_models/`.",
13
+ "category": "Machine Learning Method",
14
+ "satisfied": null
15
+ },
16
+ {
17
+ "requirement_id": 1,
18
+ "prerequisites": [],
19
+ "criteria": "Prompts are read from a text file named `data/prompt.txt` and fed into the model.",
20
+ "category": "Human Computer Interaction",
21
+ "satisfied": null
22
+ },
23
+ {
24
+ "requirement_id": 2,
25
+ "prerequisites": [
26
+ 0,
27
+ 1
28
+ ],
29
+ "criteria": "Generation length is limited to a maximum of 200 characters.",
30
+ "category": "Other",
31
+ "satisfied": null
32
+ },
33
+ {
34
+ "requirement_id": 3,
35
+ "prerequisites": [
36
+ 0,
37
+ 1,
38
+ 2
39
+ ],
40
+ "criteria": "Post-processing is implemented to clean up the text, including removing extra spaces and correcting common spelling errors.",
41
+ "category": "Data preprocessing and postprocessing",
42
+ "satisfied": null
43
+ },
44
+ {
45
+ "requirement_id": 4,
46
+ "prerequisites": [
47
+ 0,
48
+ 1,
49
+ 2,
50
+ 3
51
+ ],
52
+ "criteria": "The generated text is saved in `results/generated_text.txt`.",
53
+ "category": "Other",
54
+ "satisfied": null
55
+ }
56
+ ],
57
+ "preferences": [
58
+ {
59
+ "preference_id": 0,
60
+ "criteria": "The system should handle different input prompts efficiently and correct for minor typos.",
61
+ "satisfied": null
62
+ },
63
+ {
64
+ "preference_id": 1,
65
+ "criteria": "The system should operate efficiently, with minimal latency during text generation.",
66
+ "satisfied": null
67
+ },
68
+ {
69
+ "preference_id": 2,
70
+ "criteria": "A user-friendly output format in the saved file, perhaps with timestamps or prompt identifiers, should be used.",
71
+ "satisfied": null
72
+ }
73
+ ],
74
+ "is_kaggle_api_needed": false,
75
+ "is_training_needed": false,
76
+ "is_web_navigation_needed": false,
77
+ "hint": "There is only one prompt to read. However, requirment 1 says \"Prompts\"."
78
+ }
instances/05_Game_Simulation_DQN_CartPole_v1_RL.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "05_Game_Simulation_DQN_CartPole_v1_RL",
3
+ "query": "Create a reliable system to train a DQN agent using PyTorch in an OpenAI Gym environment such as CartPole-v1. Implement DQN using PyTorch in `src/model.py`. The environment should be instantiated in the main file, `src/main.py`. Handle any dimension mismatches between DQN and the environment (like would happen if the Gym environment ID were changed) by logging an error. Also please log the return after each episode to `results/metrics/` and update it after each new episode finishes. Save the final return over episode curve as `results/figures/return_over_time.png` and the final trained model as `models/saved_models/dqn_model.pt`. If possible, add annotations to the return over episode curve to showcase key milestones.",
4
+ "tags": [
5
+ "Reinforcement Learning"
6
+ ],
7
+ "requirements": [
8
+ {
9
+ "requirement_id": 0,
10
+ "prerequisites": [],
11
+ "criteria": "The \"DQN\" algorithm is implemented using PyTorch and saved in `src/model.py`.",
12
+ "category": "Machine Learning Method",
13
+ "satisfied": null
14
+ },
15
+ {
16
+ "requirement_id": 1,
17
+ "prerequisites": [],
18
+ "criteria": "An \"OpenAI Gym\" environment is instantiated in `src/main.py`.",
19
+ "category": "Dataset or Environment",
20
+ "satisfied": null
21
+ },
22
+ {
23
+ "requirement_id": 2,
24
+ "prerequisites": [
25
+ 0,
26
+ 1
27
+ ],
28
+ "criteria": "The return over time curve is plotted, recording the return of each episode, and saved as `results/figures/reward_over_time.png`.",
29
+ "category": "Visualization",
30
+ "satisfied": null
31
+ },
32
+ {
33
+ "requirement_id": 3,
34
+ "prerequisites": [
35
+ 0,
36
+ 1
37
+ ],
38
+ "criteria": "The trained model is saved as `models/saved_models/dqn_model.pt`.",
39
+ "category": "Save Trained Model",
40
+ "satisfied": null
41
+ },
42
+ {
43
+ "requirement_id": 4,
44
+ "prerequisites": [
45
+ 0,
46
+ 1
47
+ ],
48
+ "criteria": "The return for each episode is logged to `results/metrics/` and updated after each episode finishes.",
49
+ "category": "Performance Metrics",
50
+ "satisfied": null
51
+ }
52
+ ],
53
+ "preferences": [
54
+ {
55
+ "preference_id": 0,
56
+ "criteria": "The system should handle dimension mismatches, logging the issues for easy debugging.",
57
+ "satisfied": null
58
+ },
59
+ {
60
+ "preference_id": 1,
61
+ "criteria": "The return over episode curve has key milestones annotated.",
62
+ "satisfied": null
63
+ }
64
+ ],
65
+ "is_kaggle_api_needed": false,
66
+ "is_training_needed": true,
67
+ "is_web_navigation_needed": false
68
+ }
instances/06_Sentiment_Analysis_SVM_Sentiment140_ML.json ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "06_Sentiment_Analysis_SVM_Sentiment140_ML",
3
+ "query": "Please help me build a system for sentiment analysis on tweets using the Sentiment140 dataset available from Hugging Face. Load the Sentiment140 dataset and, when loading the data, clean it by removing stop words, punctuation, and special characters, all in `src/data_loader.py`. Use Word2Vec or GloVe for text vectorization. This should occur in `src/data_loader.py`. Next, implement and train an SVM classifier in `src/model.py`. Finally, write a report of the accuracy of the classifier to `results/metrics/accuracy_score.txt`. Ideally, the report should be easily interpretable.",
4
+ "tags": [
5
+ "Natural Language Processing",
6
+ "Supervised Learning"
7
+ ],
8
+ "requirements": [
9
+ {
10
+ "requirement_id": 0,
11
+ "prerequisites": [],
12
+ "criteria": "The \"Sentiment140\" dataset, available from \"Hugging Face,\" is obtained in `src/data_loader.py`.",
13
+ "category": "Dataset or Environment",
14
+ "satisfied": null
15
+ },
16
+ {
17
+ "requirement_id": 1,
18
+ "prerequisites": [
19
+ 0
20
+ ],
21
+ "criteria": "The dataset is cleaned, including by removing stop words, punctuation, and special characters, all in `src/data_loader.py`.",
22
+ "category": "Data preprocessing and postprocessing",
23
+ "satisfied": null
24
+ },
25
+ {
26
+ "requirement_id": 2,
27
+ "prerequisites": [
28
+ 0,
29
+ 1
30
+ ],
31
+ "criteria": "Word embeddings, either \"Word2Vec\" or \"GloVe,\" are used to convert text to vectors in `src/data_loader.py`.",
32
+ "category": "Machine Learning Method",
33
+ "satisfied": null
34
+ },
35
+ {
36
+ "requirement_id": 3,
37
+ "prerequisites": [
38
+ 0,
39
+ 1,
40
+ 2
41
+ ],
42
+ "criteria": "An \"SVM classifier\" is implemented and trained in `src/model.py`.",
43
+ "category": "Machine Learning Method",
44
+ "satisfied": null
45
+ },
46
+ {
47
+ "requirement_id": 4,
48
+ "prerequisites": [
49
+ 1,
50
+ 2,
51
+ 3
52
+ ],
53
+ "criteria": "The accuracy score is printed and saved as `results/metrics/accuracy_score.txt`.",
54
+ "category": "Performance Metrics",
55
+ "satisfied": null
56
+ }
57
+ ],
58
+ "preferences": [
59
+ {
60
+ "preference_id": 0,
61
+ "criteria": "The dataset download process should be reliable, with clear error handling.",
62
+ "satisfied": null
63
+ },
64
+ {
65
+ "preference_id": 1,
66
+ "criteria": "The final accuracy report should be straightforward and easy to interpret.",
67
+ "satisfied": null
68
+ }
69
+ ],
70
+ "is_kaggle_api_needed": false,
71
+ "is_training_needed": true,
72
+ "is_web_navigation_needed": false
73
+ }
instances/07_Image_Super_Resolution_SRCNN_Set5_DL.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "07_Image_Super_Resolution_SRCNN_Set5_DL",
3
+ "query": "Hi, I need to create a project for image super-resolution using the SRCNN model with the Set5 dataset (available from `https://huggingface.co/datasets/eugenesiow/Set5`). Load the dataset in `src/data_loader.py`. When loading the data, include image preprocessing steps such as resizing and normalization, all in `src/data_loader.py`. The SRCNN model should be loaded and used in `src/model.py`. Save 5 sets of comparison images, zooming in on details, as `results/figures/super_resolution_compare.png`, and the super-resolution results as `results/figures/super_resolution_results.png`. The generated images should be high-quality and clearly show improvements.",
4
+ "tags": [
5
+ "Computer Vision",
6
+ "Generative Models"
7
+ ],
8
+ "requirements": [
9
+ {
10
+ "requirement_id": 0,
11
+ "prerequisites": [],
12
+ "criteria": "The \"Set5\" dataset (available from \"Hugging Face\") is loaded in `src/data_loader.py`.",
13
+ "category": "Dataset or Environment",
14
+ "satisfied": null
15
+ },
16
+ {
17
+ "requirement_id": 1,
18
+ "prerequisites": [
19
+ 0
20
+ ],
21
+ "criteria": "Image preprocessing, including resizing and normalization, is performed in `src/data_loader.py`.",
22
+ "category": "Data preprocessing and postprocessing",
23
+ "satisfied": null
24
+ },
25
+ {
26
+ "requirement_id": 2,
27
+ "prerequisites": [],
28
+ "criteria": "The \"SRCNN\" model is used in `src/model.py`.",
29
+ "category": "Machine Learning Method",
30
+ "satisfied": null
31
+ },
32
+ {
33
+ "requirement_id": 3,
34
+ "prerequisites": [
35
+ 0,
36
+ 1,
37
+ 2
38
+ ],
39
+ "criteria": "Five sets of comparison images are saved, with details zoomed in, and saved as `results/figures/super_resolution_compare.png`.",
40
+ "category": "Visualization",
41
+ "satisfied": null
42
+ },
43
+ {
44
+ "requirement_id": 4,
45
+ "prerequisites": [
46
+ 0,
47
+ 1,
48
+ 2
49
+ ],
50
+ "criteria": "Super-resolution results are saved as `results/figures/super_resolution_results.png`.",
51
+ "category": "Visualization",
52
+ "satisfied": null
53
+ }
54
+ ],
55
+ "preferences": [
56
+ {
57
+ "preference_id": 0,
58
+ "criteria": "The project should generate high-quality, clear super-resolution images with detailed comparisons.",
59
+ "satisfied": null
60
+ },
61
+ {
62
+ "preference_id": 1,
63
+ "criteria": "Well-organized output images, highlighting key improvements, should be included.",
64
+ "satisfied": null
65
+ }
66
+ ],
67
+ "is_kaggle_api_needed": false,
68
+ "is_training_needed": true,
69
+ "is_web_navigation_needed": true
70
+ }
instances/08_Robot_Control_PPO_PyBullet_RL.json ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "08_Robot_Control_PPO_PyBullet_RL",
3
+ "query": "I am seeking to implement a project which explores robotic arm control via reinforcement learning in the PyBullet simulation environment with the PPO algorithm. The PyBullet simulator should be imported and a related robotics environment should be loaded in `src/env.py`. The PPO algorithm should be implemented in `src/train.py`. The project should meticulously document the robot's final position, printing and saving it as `data/final_position.txt`. The training return trajectory should be graphed and saved as `results/figures/training_returns.png`. A sample of the robot's motion should be visualized and saved as `results/figures/robot_motion.gif`. A detailed environment setup and reward structure description should be provided in `src/env.py`. Please ensure that any issues with loading URDF files in PyBullet are clearly handled and documented, providing clear error messages or logging for debugging.",
4
+ "tags": [
5
+ "Reinforcement Learning"
6
+ ],
7
+ "requirements": [
8
+ {
9
+ "requirement_id": 0,
10
+ "prerequisites": [],
11
+ "criteria": "The \"PyBullet\" simulator is used in `src/env.py`.",
12
+ "category": "Dataset or Environment",
13
+ "satisfied": null
14
+ },
15
+ {
16
+ "requirement_id": 1,
17
+ "prerequisites": [],
18
+ "criteria": "The \"PPO\" algorithm is used in `src/train.py`.",
19
+ "category": "Machine Learning Method",
20
+ "satisfied": null
21
+ },
22
+ {
23
+ "requirement_id": 2,
24
+ "prerequisites": [
25
+ 0
26
+ ],
27
+ "criteria": "A detailed environment setup and reward structure description is provided in `src/env.py`.",
28
+ "category": "Dataset or Environment",
29
+ "satisfied": null
30
+ },
31
+ {
32
+ "requirement_id": 3,
33
+ "prerequisites": [
34
+ 0,
35
+ 1,
36
+ 2
37
+ ],
38
+ "criteria": "The robot's final position is printed and saved as `data/final_position.txt`.",
39
+ "category": "Other",
40
+ "satisfied": null
41
+ },
42
+ {
43
+ "requirement_id": 4,
44
+ "prerequisites": [
45
+ 0,
46
+ 1,
47
+ 2
48
+ ],
49
+ "criteria": "The training returns over time curve is recorded and saved as `results/figures/training_returns.png`.",
50
+ "category": "Visualization",
51
+ "satisfied": null
52
+ },
53
+ {
54
+ "requirement_id": 5,
55
+ "prerequisites": [
56
+ 0,
57
+ 1,
58
+ 2
59
+ ],
60
+ "criteria": "A sample of the robot's motion is visualized and saved as `results/figures/robot_motion.gif`.",
61
+ "category": "Visualization",
62
+ "satisfied": null
63
+ }
64
+ ],
65
+ "preferences": [
66
+ {
67
+ "preference_id": 0,
68
+ "criteria": "The system should effectively handle potential issues with loading URDF files in PyBullet, providing clear error messages or logging for debugging.",
69
+ "satisfied": null
70
+ }
71
+ ],
72
+ "is_kaggle_api_needed": false,
73
+ "is_training_needed": true,
74
+ "is_web_navigation_needed": false
75
+ }
instances/09_Recommendation_System_NCF_MovieLens_ML.json ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "09_Recommendation_System_NCF_MovieLens_ML",
3
+ "query": "Help me develop a system to recommend movies based on user ratings from the MovieLens dataset using a Neural Collaborative Filtering (NCF) approach. First, load the dataset and split it into training and testing sets in `src/data_loader.py`. Next, implement the NCF approach and a matrix factorization baseline in `src/model.py`. Using these, print an example of the top 10 recommendations for a test user the NCF approach and the baseline and save them to `results/metrics/top_10_recommendations.txt`. It would be good if these sample recommendations were meaningful given the test user. Evaluate the system's performance using RMSE, MAE, etc., and save the results of this evaluation to `results/metrics/evaluation_metrics.txt`. Try and ensure that there is robust path handling that can deal with missing directories and such when saving files.",
4
+ "tags": [
5
+ "Recommender Systems",
6
+ "Supervised Learning"
7
+ ],
8
+ "requirements": [
9
+ {
10
+ "requirement_id": 0,
11
+ "prerequisites": [],
12
+ "criteria": "The \"Neural Collaborative Filtering (NCF)\" algorithm is implemented in `src/model.py`.",
13
+ "category": "Machine Learning Method",
14
+ "satisfied": null
15
+ },
16
+ {
17
+ "requirement_id": 1,
18
+ "prerequisites": [],
19
+ "criteria": "The \"MovieLens\" dataset is loaded in 'src/data_loader.py'.",
20
+ "category": "Dataset or Environment",
21
+ "satisfied": null
22
+ },
23
+ {
24
+ "requirement_id": 2,
25
+ "prerequisites": [
26
+ 1
27
+ ],
28
+ "criteria": "Data is split into training and testing sets in `src/data_loader.py`.",
29
+ "category": "Data preprocessing and postprocessing",
30
+ "satisfied": null
31
+ },
32
+ {
33
+ "requirement_id": 3,
34
+ "prerequisites": [],
35
+ "criteria": "A matrix factorization baseline is implemented in in `src/model.py`.",
36
+ "category": "Machine Learning Method",
37
+ "satisfied": null
38
+ },
39
+ {
40
+ "requirement_id": 4,
41
+ "prerequisites": [
42
+ 0,
43
+ 1,
44
+ 2,
45
+ 3
46
+ ],
47
+ "criteria": "The top 10 recommendations for a test user under the \"NCF\" approach and the baseline are saved in `results/metrics/top_10_recommendations.txt`.",
48
+ "category": "Other",
49
+ "satisfied": null
50
+ },
51
+ {
52
+ "requirement_id": 5,
53
+ "prerequisites": [
54
+ 0,
55
+ 1,
56
+ 2,
57
+ 3
58
+ ],
59
+ "criteria": "The recommendation system performance is evaluated, including with \"RMSE\" and \"MAE,\" and the results are saved as `results/metrics/evaluation_metrics.txt`.",
60
+ "category": "Performance Metrics",
61
+ "satisfied": null
62
+ }
63
+ ],
64
+ "preferences": [
65
+ {
66
+ "preference_id": 0,
67
+ "criteria": "Robust path handling is implemented to deal with things like missing directories.",
68
+ "satisfied": null
69
+ },
70
+ {
71
+ "preference_id": 1,
72
+ "criteria": "The top 10 recommendations should be clear and relevant to the sample user's preferences.",
73
+ "satisfied": null
74
+ }
75
+ ],
76
+ "is_kaggle_api_needed": false,
77
+ "is_training_needed": true,
78
+ "is_web_navigation_needed": false
79
+ }
instances/10_Face_Recognition_FaceNet_LFW_DL.json ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "10_Face_Recognition_FaceNet_LFW_DL",
3
+ "query": "Help me create a PyTorch face recognition project using the FaceNet model with the LFW dataset. Load the dataset in `src/model.py`. Get the model from Hugging Face (you can find it at https://huggingface.co/py-feat/facenet) and save it in `models/saved_models/`. Ensure the data is preprocessed to ensure the standardization of facial images in `src/data_loader.py`. Use facial embeddings in `src/model.py` to improve the performance of your system. Print the recognition accuracy and save it to `results/metrics/recognition_accuracy.txt`. Next, visualize the embedding results and save them as `results/figures/embedding_visualization.png`. The model should load without issues, ideally with some error handling if something goes wrong. The visualizations should make it easy to see how the embeddings represent distinct facial features.",
4
+ "tags": [
5
+ "Computer Vision",
6
+ "Supervised Learning"
7
+ ],
8
+ "requirements": [
9
+ {
10
+ "requirement_id": 0,
11
+ "prerequisites": [],
12
+ "criteria": "The \"LFW\" (Labeled Faces in the Wild) dataset is loaded in `src/data_loader.py`.",
13
+ "category": "Dataset or Environment",
14
+ "satisfied": null
15
+ },
16
+ {
17
+ "requirement_id": 1,
18
+ "prerequisites": [
19
+ 0
20
+ ],
21
+ "criteria": "Data alignment and standardization of facial images is performed in `src/data_loader.py`.",
22
+ "category": "Data preprocessing and postprocessing",
23
+ "satisfied": null
24
+ },
25
+ {
26
+ "requirement_id": 2,
27
+ "prerequisites": [],
28
+ "criteria": "The \"FaceNet\" model in \"PyTorch\" is used, loading from [Hugging Face](https://huggingface.co/py-feat/facenet). Save the model in models/saved_models/.",
29
+ "category": "Machine Learning Method",
30
+ "satisfied": null
31
+ },
32
+ {
33
+ "requirement_id": 3,
34
+ "prerequisites": [
35
+ 0,
36
+ 1,
37
+ 2
38
+ ],
39
+ "criteria": "Facial embeddings are used in `src/model.py`.",
40
+ "category": "Machine Learning Method",
41
+ "satisfied": null
42
+ },
43
+ {
44
+ "requirement_id": 4,
45
+ "prerequisites": [
46
+ 0,
47
+ 1,
48
+ 2,
49
+ 3
50
+ ],
51
+ "criteria": "Recognition accuracy is printed and saved as `results/metrics/recognition_accuracy.txt`.",
52
+ "category": "Performance Metrics",
53
+ "satisfied": null
54
+ },
55
+ {
56
+ "requirement_id": 5,
57
+ "prerequisites": [
58
+ 0,
59
+ 1,
60
+ 2,
61
+ 3
62
+ ],
63
+ "criteria": "Embedding results are visualized and saved as `results/figures/embedding_visualization.png`.",
64
+ "category": "Visualization",
65
+ "satisfied": null
66
+ }
67
+ ],
68
+ "preferences": [
69
+ {
70
+ "preference_id": 0,
71
+ "criteria": "The model loading process should be smooth, with clear handling of any issues if the model fails to load.",
72
+ "satisfied": null
73
+ },
74
+ {
75
+ "preference_id": 1,
76
+ "criteria": "Embedding visualizations should be clear and effectively highlight distinct facial features.",
77
+ "satisfied": null
78
+ }
79
+ ],
80
+ "is_kaggle_api_needed": false,
81
+ "is_training_needed": false,
82
+ "is_web_navigation_needed": true,
83
+ "hint": "The page https://huggingface.co/py-feat/facenet provides guidance on how to use FaceNet; however, Hugging Face does not currently offer a model entry for direct use."
84
+ }
instances/11_House_Price_Prediction_LinearRegression_BostonHousing_ML.json ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "11_House_Price_Prediction_LinearRegression_BostonHousing_ML",
3
+ "query": "Hi. Set up a house price prediction project using a Linear Regression model on the Boston Housing dataset. Load the dataset using `from datasets import load_dataset` and `ds = load_dataset(\"~/mrseba/boston_house_price\")` in `src/data_loader.py`. Ensure feature scaling and data standardization are performed in `src/data_loader.py`. Implement the Linear Regression model in `src/model.py`. Use cross-validation to evaluate the model in `src/train.py`. Print the Mean Squared Error (MSE), Mean Absolute Error (MAE), and $R^2$ score, and save them under `results/metrics/metrics.txt`. Visualize the comparison between predicted and actual values and save the result as `results/figures/`prediction_vs_actual.png`. The visualizations should clearly demonstrate the model's accuracy (which, if done right, should be good).",
4
+ "tags": [
5
+ "Financial Analysis",
6
+ "Regression",
7
+ "Supervised Learning"
8
+ ],
9
+ "requirements": [
10
+ {
11
+ "requirement_id": 0,
12
+ "prerequisites": [],
13
+ "criteria": "The \"Boston Housing\" dataset is utilized using `from datasets import load_dataset` and `ds = load_dataset(\"mrseba/boston_house_price\")` in `src/data_loader.py`.",
14
+ "category": "Dataset or Environment",
15
+ "satisfied": null
16
+ },
17
+ {
18
+ "requirement_id": 1,
19
+ "prerequisites": [
20
+ 0
21
+ ],
22
+ "criteria": "Feature scaling and data standardization are performed in `src/data_loader.py`.",
23
+ "category": "Data preprocessing and postprocessing",
24
+ "satisfied": null
25
+ },
26
+ {
27
+ "requirement_id": 2,
28
+ "prerequisites": [],
29
+ "criteria": "The \"Linear Regression\" model is implemented in `src/model.py`.",
30
+ "category": "Machine Learning Method",
31
+ "satisfied": null
32
+ },
33
+ {
34
+ "requirement_id": 3,
35
+ "prerequisites": [
36
+ 0,
37
+ 1,
38
+ 2
39
+ ],
40
+ "criteria": "\"Cross-validation\" is used to evaluate the model in `src/train.py`.",
41
+ "category": "Performance Metrics",
42
+ "satisfied": null
43
+ },
44
+ {
45
+ "requirement_id": 4,
46
+ "prerequisites": [
47
+ 0,
48
+ 1,
49
+ 2,
50
+ 3
51
+ ],
52
+ "criteria": "\"Mean Squared Error (MSE),\" \"Mean Absolute Error (MAE),\" and \"R^2 score\" are printed, and saved as `results/metrics/metrics.txt`.",
53
+ "category": "Performance Metrics",
54
+ "satisfied": null
55
+ },
56
+ {
57
+ "requirement_id": 5,
58
+ "prerequisites": [
59
+ 0,
60
+ 1,
61
+ 2,
62
+ 3
63
+ ],
64
+ "criteria": "The comparison of predicted vs. actual values is visualized and saved as `results/figures/prediction_vs_actual.png`.",
65
+ "category": "Visualization",
66
+ "satisfied": null
67
+ }
68
+ ],
69
+ "preferences": [
70
+ {
71
+ "preference_id": 0,
72
+ "criteria": "The dataset should load smoothly using the provided `load_dataset` code, and other methods should be tried if issues arise.",
73
+ "satisfied": null
74
+ },
75
+ {
76
+ "preference_id": 1,
77
+ "criteria": "The visualizations should clearly demonstrate the model's accuracy by highlighting the differences between predicted and actual values.",
78
+ "satisfied": null
79
+ }
80
+ ],
81
+ "is_kaggle_api_needed": false,
82
+ "is_training_needed": true,
83
+ "is_web_navigation_needed": false,
84
+ "hint": "`ds = load_dataset(\"~/mrseba/boston_house_price\")` in the query is wrong, and it should be `ds = load_dataset(\"mrseba/boston_house_price\")`. We leave it here to check the self-debugging skill of the agents."
85
+ }
instances/12_Spam_Detection_SVM_Enron_ML.json ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "12_Spam_Detection_SVM_Enron_ML",
3
+ "query": "Hello. I need you to build a project to detect spam emails using the Support Vector Machine (SVM) classifier on the Enron-Spam dataset. The project should preprocess the text by removing stop words and punctuation, employ TF-IDF features, perform hyperparameter tuning using GridSearchCV, and save the confusion matrix to `results/figures/confusion_matrix.png`. I also need to write and save a comprehensive report, including precision, recall, F1-score, and the confusion matrix (to be generated as `results/figures/confusion_matrix.png`), under `results/classification_report.pdf`. The Enron-Spam dataset should be loaded in `src/data_loader.py`. Text preprocessing, including removing stop words and punctuation, and calculating TF-IDF features should be performed in `src/data_loader.py`. The SVM classifier should be implemented in `src/model.py`. Hyperparameter tuning should be performed using GridSearchCV in `src/train.py`. It would be helpful if the text preprocessing step is optimized to handle a large number of emails efficiently.",
4
+ "tags": [
5
+ "Classification",
6
+ "Natural Language Processing",
7
+ "Supervised Learning"
8
+ ],
9
+ "requirements": [
10
+ {
11
+ "requirement_id": 0,
12
+ "prerequisites": [],
13
+ "criteria": "The \"Enron-Spam\" dataset is loaded in `src/data_loader.py`.",
14
+ "category": "Dataset or Environment",
15
+ "satisfied": null
16
+ },
17
+ {
18
+ "requirement_id": 1,
19
+ "prerequisites": [
20
+ 0
21
+ ],
22
+ "criteria": "Text preprocessing is performed, including removing stop words and punctuation in `src/data_loader.py`.",
23
+ "category": "Data preprocessing and postprocessing",
24
+ "satisfied": null
25
+ },
26
+ {
27
+ "requirement_id": 2,
28
+ "prerequisites": [
29
+ 0,
30
+ 1
31
+ ],
32
+ "criteria": "\"TF-IDF\" features are used in `src/data_loader.py`.",
33
+ "category": "Data preprocessing and postprocessing",
34
+ "satisfied": null
35
+ },
36
+ {
37
+ "requirement_id": 3,
38
+ "prerequisites": [],
39
+ "criteria": "The \"SVM classifier\" is implemented in `src/model.py`.",
40
+ "category": "Machine Learning Method",
41
+ "satisfied": null
42
+ },
43
+ {
44
+ "requirement_id": 4,
45
+ "prerequisites": [
46
+ 0,
47
+ 1,
48
+ 2,
49
+ 3
50
+ ],
51
+ "criteria": "Hyperparameter tuning is performed using \"GridSearchCV\" in `src/train.py`.",
52
+ "category": "Machine Learning Method",
53
+ "satisfied": null
54
+ },
55
+ {
56
+ "requirement_id": 5,
57
+ "prerequisites": [
58
+ 0,
59
+ 1,
60
+ 2,
61
+ 3,
62
+ 4
63
+ ],
64
+ "criteria": "The confusion matrix is saved as `results/figures/confusion_matrix.png`.",
65
+ "category": "Visualization",
66
+ "satisfied": null
67
+ },
68
+ {
69
+ "requirement_id": 6,
70
+ "prerequisites": [
71
+ 0,
72
+ 1,
73
+ 2,
74
+ 3,
75
+ 4,
76
+ 5
77
+ ],
78
+ "criteria": "A classification report, including \"precision,\" \"recall,\" \"F1-score,\" and the figure `results/figures/confusion_matrix.png`, is saved as `results/classification_report.pdf`.",
79
+ "category": "Performance Metrics",
80
+ "satisfied": null
81
+ }
82
+ ],
83
+ "preferences": [
84
+ {
85
+ "preference_id": 0,
86
+ "criteria": "The text preprocessing step should be optimized to handle a large number of emails efficiently.",
87
+ "satisfied": null
88
+ },
89
+ {
90
+ "preference_id": 1,
91
+ "criteria": "The classification report should be comprehensive.",
92
+ "satisfied": null
93
+ }
94
+ ],
95
+ "is_kaggle_api_needed": false,
96
+ "is_training_needed": true,
97
+ "is_web_navigation_needed": false
98
+ }
instances/13_Style_Transfer_Perceptual_Loss_CustomImages_DL.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "13_Style_Transfer_Perceptual_Loss_CustomImages_DL",
3
+ "query": "Please create a PyTorch Perceptual Loss project for image style transfer (refer to this paper: https://arxiv.org/pdf/1603.08155). You can build the Perceptual Loss Network using VGG16 in `src/model.py`. The project should combine content and style images, allow smooth adjustment of style intensity by tuning the weights of style loss and content loss, and save the stylized images in `results/figures/`. Additionally, log the processing time to `results/processing_time.txt`, and save the intermediate results of the style transfer process to `results/figures/intermediate_results.png`. For testing, input a famous content image (Mona Lisa) from [this link](https://upload.wikimedia.org/wikipedia/commons/thumb/e/ec/Mona_Lisa%2C_by_Leonardo_da_Vinci%2C_from_C2RMF_retouched.jpg/768px-Mona_Lisa%2C_by_Leonardo_da_Vinci%2C_from_C2RMF_retouched.jpg) and a famous style image (The Starry Night) from [this link](https://ia904705.us.archive.org/34/items/the-starry-night-vincent-van-gogh/The%20Starry%20Night%20-%20Vincent%20van%20Gogh/%21PREVIEW%21%20-%20The%20Starry%20Night%20-%20Vincent%20van%20Gogh.jpg), and generate a style-transfered image. Save the content, style, and style-transfered images to `data/content.jpg`, `data/style.jpg`, and `results/figures/`, respectively. The project should efficiently handle high-resolution images without excessive processing time.",
4
+ "tags": [
5
+ "Computer Vision",
6
+ "Generative Models"
7
+ ],
8
+ "requirements": [
9
+ {
10
+ "requirement_id": 0,
11
+ "prerequisites": [],
12
+ "criteria": "A famous content image is inputted for testing, downloaded from [this link](https://upload.wikimedia.org/wikipedia/commons/thumb/e/ec/Mona_Lisa%2C_by_Leonardo_da_Vinci%2C_from_C2RMF_retouched.jpg/768px-Mona_Lisa%2C_by_Leonardo_da_Vinci%2C_from_C2RMF_retouched.jpg) and saved to `data/content.jpg`.",
13
+ "category": "Dataset or Environment",
14
+ "satisfied": null
15
+ },
16
+ {
17
+ "requirement_id": 1,
18
+ "prerequisites": [],
19
+ "criteria": "A famous style image is inputted for testing, downloaded from [this link](https://ia904705.us.archive.org/34/items/the-starry-night-vincent-van-gogh/The%20Starry%20Night%20-%20Vincent%20van%20Gogh/%21PREVIEW%21%20-%20The%20Starry%20Night%20-%20Vincent%20van%20Gogh.jpg) and saved in `data/style.jpg`.",
20
+ "category": "Dataset or Environment",
21
+ "satisfied": null
22
+ },
23
+ {
24
+ "requirement_id": 2,
25
+ "prerequisites": [],
26
+ "criteria": "The Perceptual loss model implemented in \"PyTorch\" is loaded in `src/model.py`.",
27
+ "category": "Machine Learning Method",
28
+ "satisfied": null
29
+ },
30
+ {
31
+ "requirement_id": 3,
32
+ "prerequisites": [
33
+ 0,
34
+ 1,
35
+ 2
36
+ ],
37
+ "criteria": "Stylized images are saved to the specified folder `results/figures/`.",
38
+ "category": "Other",
39
+ "satisfied": null
40
+ },
41
+ {
42
+ "requirement_id": 4,
43
+ "prerequisites": [
44
+ 0,
45
+ 1,
46
+ 2
47
+ ],
48
+ "criteria": "Style intensity is adjusted by tuning the weights of style loss and content loss in `src/model.py`.",
49
+ "category": "Machine Learning Method",
50
+ "satisfied": null
51
+ },
52
+ {
53
+ "requirement_id": 5,
54
+ "prerequisites": [
55
+ 0,
56
+ 1,
57
+ 2,
58
+ 3,
59
+ 4
60
+ ],
61
+ "criteria": "Processing time is recorded and saved as `results/processing_time.txt`.",
62
+ "category": "Performance Metrics",
63
+ "satisfied": null
64
+ },
65
+ {
66
+ "requirement_id": 6,
67
+ "prerequisites": [
68
+ 0,
69
+ 1,
70
+ 2,
71
+ 3,
72
+ 4
73
+ ],
74
+ "criteria": "Intermediate results of style transfer are saved as `results/figures/intermediate_results.png`.",
75
+ "category": "Visualization",
76
+ "satisfied": null
77
+ }
78
+ ],
79
+ "preferences": [
80
+ {
81
+ "preference_id": 0,
82
+ "criteria": "The style transfer process should allow for smooth adjustment of style intensity, making the stylized image visually appealing.",
83
+ "satisfied": null
84
+ },
85
+ {
86
+ "preference_id": 1,
87
+ "criteria": "The project should handle high-resolution images efficiently without excessive processing time.",
88
+ "satisfied": null
89
+ }
90
+ ],
91
+ "is_kaggle_api_needed": false,
92
+ "is_training_needed": false,
93
+ "is_web_navigation_needed": false,
94
+ "hint": "VGG16 was not originally designed for style transfer. However, the user's query states, 'Please create a PyTorch project for image style transfer using a pre-trained VGG16 model.' Ideally, a well-informed agent should create or find a model for style transfer networks that incorporate pre-trained VGG16, rather than simply loading the VGG16 model."
95
+ }
instances/14_Customer_Churn_Prediction_LogisticRegression_Telco_ML.json ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "14_Customer_Churn_Prediction_LogisticRegression_Telco_ML",
3
+ "query": "Help me develop a system to predict customer churn using the Telco Customer Churn dataset, potentially being downloaded from [this link](https://huggingface.co/datasets/scikit-learn/churn-prediction). Load the dataset in `src/data_loader.py`. The project should include feature engineering, such as feature selection and scaling, and handle imbalanced data using oversampling or undersampling techniques implemented in `src/data_loader.py`. The exact details of this are left for you to decide. Implement a Logistic Regression model in `src/model.py` and perform cross-validation while training the model in `src/train.py`. Finally, print and save the classification report (including precision, recall, and F1-score) to `results/metrics/classification_report.txt`, and save a ROC curve to `results/figures/roc_curve.png`. Ensure the dataset loads smoothly with appropriate error handling. The feature engineering should thoroughly select the most relevant features.",
4
+ "tags": [
5
+ "Classification",
6
+ "Supervised Learning"
7
+ ],
8
+ "requirements": [
9
+ {
10
+ "requirement_id": 0,
11
+ "prerequisites": [],
12
+ "criteria": "The \"Telco Customer Churn\" dataset is used, potentially being downloaded from [this link](https://huggingface.co/datasets/scikit-learn/churn-prediction). Load the dataset in `src/data_loader.py`.",
13
+ "category": "Dataset or Environment",
14
+ "satisfied": null
15
+ },
16
+ {
17
+ "requirement_id": 1,
18
+ "prerequisites": [
19
+ 0
20
+ ],
21
+ "criteria": "Feature engineering, including feature selection and scaling, is implemented in `src/data_loader.py`.",
22
+ "category": "Data preprocessing and postprocessing",
23
+ "satisfied": null
24
+ },
25
+ {
26
+ "requirement_id": 2,
27
+ "prerequisites": [
28
+ 0
29
+ ],
30
+ "criteria": "Imbalanced data is handled using oversampling or undersampling techniques in `src/data_loader.py`.",
31
+ "category": "Data preprocessing and postprocessing",
32
+ "satisfied": null
33
+ },
34
+ {
35
+ "requirement_id": 3,
36
+ "prerequisites": [],
37
+ "criteria": "The \"Logistic Regression\" model is implemented in `src/model.py`.",
38
+ "category": "Machine Learning Method",
39
+ "satisfied": null
40
+ },
41
+ {
42
+ "requirement_id": 4,
43
+ "prerequisites": [
44
+ 1,
45
+ 2,
46
+ 3
47
+ ],
48
+ "criteria": "Cross-validation is used to evaluate the model in `src/train.py`.",
49
+ "category": "Performance Metrics",
50
+ "satisfied": null
51
+ },
52
+ {
53
+ "requirement_id": 5,
54
+ "prerequisites": [
55
+ 0,
56
+ 1,
57
+ 2,
58
+ 3,
59
+ 4
60
+ ],
61
+ "criteria": "A classification report, including \"precision,\" \"recall,\" and \"F1-score,\" is saved as `results/metrics/classification_report.txt`.",
62
+ "category": "Performance Metrics",
63
+ "satisfied": null
64
+ },
65
+ {
66
+ "requirement_id": 6,
67
+ "prerequisites": [
68
+ 0,
69
+ 1,
70
+ 2,
71
+ 3,
72
+ 4
73
+ ],
74
+ "criteria": "A \"ROC curve\" is saved as `results/figures/roc_curve.png`.",
75
+ "category": "Visualization",
76
+ "satisfied": null
77
+ }
78
+ ],
79
+ "preferences": [
80
+ {
81
+ "preference_id": 0,
82
+ "criteria": "The dataset should load smoothly, with proper error handling if issues arise during download.",
83
+ "satisfied": null
84
+ },
85
+ {
86
+ "preference_id": 1,
87
+ "criteria": "The feature engineering process should be thorough, ensuring that the most relevant features are selected for the model.",
88
+ "satisfied": null
89
+ }
90
+ ],
91
+ "is_kaggle_api_needed": false,
92
+ "is_training_needed": true,
93
+ "is_web_navigation_needed": true
94
+ }
instances/15_Image_Captioning_ShowAndTell_Flickr8k_DL.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "15_Image_Captioning_ShowAndTell_Flickr8k_DL",
3
+ "query": "This is my current PyTorch project: Develop an automatic image captioning system using the Show and Tell model. Here I found a repo can guide you: https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Image-Captioning. Use the dataset Flickr8k dataset, downloading it from [this link](https://huggingface.co/datasets/jxie/flickr8k) and load it in `src/data_loader.py`. The system should generate descriptions of sample images and save them to `results/metrics/generated_descriptions.txt`. An attention mechanism must be implemented in `src/model.py`. Save the pre-trained model as `models/saved_models/show_and_tell_model.pt`. Visualize the attention weights and save it to `results/figures/attention_weights.png`. The dataset should load smoothly, with proper error handling if any issues arise. Ideally, the attention mechanism should clearly highlight the image regions that are most influential in generating captions.",
4
+ "tags": [
5
+ "Computer Vision",
6
+ "Natural Language Processing"
7
+ ],
8
+ "requirements": [
9
+ {
10
+ "requirement_id": 0,
11
+ "prerequisites": [],
12
+ "criteria": "The pre-trained \"Show and Tell\" model is used.",
13
+ "category": "Machine Learning Method",
14
+ "satisfied": null
15
+ },
16
+ {
17
+ "requirement_id": 1,
18
+ "prerequisites": [],
19
+ "criteria": "The \"Flickr8k\" dataset, potentially downloaded from [this link](https://huggingface.co/datasets/jxie/flickr8k), is loaded in `src/data_loader.py`.",
20
+ "category": "Dataset or Environment",
21
+ "satisfied": null
22
+ },
23
+ {
24
+ "requirement_id": 2,
25
+ "prerequisites": [
26
+ 0,
27
+ 1
28
+ ],
29
+ "criteria": "The attention mechanism is implemented in `src/model.py`.",
30
+ "category": "Machine Learning Method",
31
+ "satisfied": null
32
+ },
33
+ {
34
+ "requirement_id": 3,
35
+ "prerequisites": [
36
+ 0,
37
+ 1,
38
+ 2
39
+ ],
40
+ "criteria": "Generated descriptions of sample images are saved in `results/metrics/generated_descriptions.txt`.",
41
+ "category": "Other",
42
+ "satisfied": null
43
+ },
44
+ {
45
+ "requirement_id": 4,
46
+ "prerequisites": [
47
+ 0,
48
+ 1,
49
+ 2
50
+ ],
51
+ "criteria": "The trained model is saved as `models/saved_models/show_and_tell_model.pt`.",
52
+ "category": "Save Trained Model",
53
+ "satisfied": null
54
+ },
55
+ {
56
+ "requirement_id": 5,
57
+ "prerequisites": [
58
+ 0,
59
+ 1,
60
+ 2
61
+ ],
62
+ "criteria": "A visualization of attention weights is saved as `results/figures/attention_weights.png`.",
63
+ "category": "Visualization",
64
+ "satisfied": null
65
+ }
66
+ ],
67
+ "preferences": [
68
+ {
69
+ "preference_id": 0,
70
+ "criteria": "The dataset should load smoothly, with clear error handling if any issues arise during download.",
71
+ "satisfied": null
72
+ },
73
+ {
74
+ "preference_id": 1,
75
+ "criteria": "The attention mechanism should clearly highlight the image regions that contribute most to the generated captions.",
76
+ "satisfied": null
77
+ }
78
+ ],
79
+ "is_kaggle_api_needed": false,
80
+ "is_training_needed": true,
81
+ "is_web_navigation_needed": true
82
+ }
instances/16_Credit_Scoring_DecisionTree_GermanCredit_ML.json ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "16_Credit_Scoring_DecisionTree_GermanCredit_ML",
3
+ "query": "Help me develop a system to predict credit scores using the German Credit dataset, which can be downloaded from [this link](https://archive.ics.uci.edu/dataset/144/statlog+german+credit+data). Load the dataset and preprocess it, including handling missing values and feature encoding, in `src/data_loader.py`. Use a Decision Tree classifier implemented in `src/model.py` with cross-validation to evaluate the model in `src/train.py`. Visualize feature importances in `results/figures/feature_importances.png`. Generate a classification report, including precision, recall, and F1-score, and save it to `results/metrics/classification_report.txt`. Create a Markdown report with results and visualizations and save it in `results/report.md`. The dataset should load smoothly with proper error handling, and the Markdown report should be well-organized for easy review.",
4
+ "tags": [
5
+ "Classification",
6
+ "Supervised Learning"
7
+ ],
8
+ "requirements": [
9
+ {
10
+ "requirement_id": 0,
11
+ "prerequisites": [],
12
+ "criteria": "Load the \"German Credit\" dataset, potentially downloading it from [this link](https://archive.ics.uci.edu/dataset/144/statlog+german+credit+data) in the `src/data_loader.py` file.",
13
+ "category": "Dataset or Environment",
14
+ "satisfied": null
15
+ },
16
+ {
17
+ "requirement_id": 1,
18
+ "prerequisites": [
19
+ 0
20
+ ],
21
+ "criteria": "Data preprocessing is performed in `src/data_loader.py`, including handling missing values and feature encoding.",
22
+ "category": "Data preprocessing and postprocessing",
23
+ "satisfied": null
24
+ },
25
+ {
26
+ "requirement_id": 2,
27
+ "prerequisites": [],
28
+ "criteria": "A \"Decision Tree\" classifier is implemented in `src/model.py`.",
29
+ "category": "Machine Learning Method",
30
+ "satisfied": null
31
+ },
32
+ {
33
+ "requirement_id": 3,
34
+ "prerequisites": [
35
+ 1,
36
+ 2
37
+ ],
38
+ "criteria": "Cross-validation is used to evaluate the model in `src/train.py`.",
39
+ "category": "Performance Metrics",
40
+ "satisfied": null
41
+ },
42
+ {
43
+ "requirement_id": 4,
44
+ "prerequisites": [
45
+ 0,
46
+ 1,
47
+ 2,
48
+ 3
49
+ ],
50
+ "criteria": "Feature importances are visualized in `results/figures/feature_importances.png`.",
51
+ "category": "Visualization",
52
+ "satisfied": null
53
+ },
54
+ {
55
+ "requirement_id": 5,
56
+ "prerequisites": [
57
+ 0,
58
+ 1,
59
+ 2,
60
+ 3
61
+ ],
62
+ "criteria": "A classification report, including \"precision,\" \"recall,\" and \"F1-score,\" is generated and saved as `results/metrics/classification_report.txt`.",
63
+ "category": "Performance Metrics",
64
+ "satisfied": null
65
+ },
66
+ {
67
+ "requirement_id": 6,
68
+ "prerequisites": [
69
+ 0,
70
+ 1,
71
+ 2,
72
+ 3,
73
+ 4,
74
+ 5
75
+ ],
76
+ "criteria": "A Markdown file containing results and visualizations is generated and saved in `results/report.md`.",
77
+ "category": "Visualization",
78
+ "satisfied": null
79
+ }
80
+ ],
81
+ "preferences": [
82
+ {
83
+ "preference_id": 0,
84
+ "criteria": "The dataset should load smoothly, with clear error handling if any issues arise during download.",
85
+ "satisfied": null
86
+ },
87
+ {
88
+ "preference_id": 1,
89
+ "criteria": "The Markdown report should be well-organized, making it easy to review all the results and visualizations.",
90
+ "satisfied": null
91
+ }
92
+ ],
93
+ "is_kaggle_api_needed": false,
94
+ "is_training_needed": true,
95
+ "is_web_navigation_needed": true
96
+ }
instances/17_Heart_Disease_Prediction_XGBoost_UCI_ML.json ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "17_Heart_Disease_Prediction_XGBoost_UCI_ML",
3
+ "query": "Create a project to predict heart disease using an XGBoost model with the UCI Heart Disease dataset, which can be downloaded from [this link](https://archive.ics.uci.edu/dataset/45/heart+disease). Load the dataset in `src/data_loader.py`. Implement feature selection and data standardization in `src/data_loader.py`. Use SHAP values to explain the feature importance, and save the results as `results/figures/shap_importance.png`. Implement the XGBoost model in `src/model.py`. Then, use SHAP values to explain the feature importance, and save the results as `results/shap_importance.png`. Save the ROC curve to `results/figures/roc_curve.png`. Finally, generate an HTML report containing all the results and visualizations, and save it as `results/report.html`. Ensure the SHAP visualizations clearly highlight the most impactful features. Include a performance comparison with another model, such as Logistic Regression, to validate the robustness of the XGBoost model. Save the XGBoost model under `models/saved_models/`.",
4
+ "tags": [
5
+ "Classification",
6
+ "Medical Analysis",
7
+ "Supervised Learning"
8
+ ],
9
+ "requirements": [
10
+ {
11
+ "requirement_id": 0,
12
+ "prerequisites": [],
13
+ "criteria": "The \"UCI Heart Disease\" dataset is used, potentially being downloaded from [this link](https://archive.ics.uci.edu/dataset/45/heart+disease). Load the dataset in `src/data_loader.py`.",
14
+ "category": "Dataset or Environment",
15
+ "satisfied": null
16
+ },
17
+ {
18
+ "requirement_id": 1,
19
+ "prerequisites": [
20
+ 0
21
+ ],
22
+ "criteria": "Feature selection is implemented in `src/data_loader.py`.",
23
+ "category": "Data preprocessing and postprocessing",
24
+ "satisfied": null
25
+ },
26
+ {
27
+ "requirement_id": 2,
28
+ "prerequisites": [
29
+ 0
30
+ ],
31
+ "criteria": "Data standardization which ensures feature values are within the same range is implemented in `src/data_loader.py`.",
32
+ "category": "Data preprocessing and postprocessing",
33
+ "satisfied": null
34
+ },
35
+ {
36
+ "requirement_id": 3,
37
+ "prerequisites": [],
38
+ "criteria": "The \"XGBoost\" model is implemented in `src/model.py`.",
39
+ "category": "Machine Learning Method",
40
+ "satisfied": null
41
+ },
42
+ {
43
+ "requirement_id": 4,
44
+ "prerequisites": [
45
+ 0,
46
+ 1,
47
+ 2,
48
+ 3
49
+ ],
50
+ "criteria": "\"SHAP\" values are used for feature importance explanation, with results saved as `results/figures/shap_importance.png`.",
51
+ "category": "Visualization",
52
+ "satisfied": null
53
+ },
54
+ {
55
+ "requirement_id": 5,
56
+ "prerequisites": [
57
+ 0,
58
+ 1,
59
+ 2,
60
+ 3
61
+ ],
62
+ "criteria": "The ROC curve saved as `results/figures/roc_curve.png`.",
63
+ "category": "Visualization",
64
+ "satisfied": null
65
+ },
66
+ {
67
+ "requirement_id": 6,
68
+ "prerequisites": [
69
+ 0,
70
+ 1,
71
+ 2,
72
+ 3,
73
+ 4,
74
+ 5
75
+ ],
76
+ "criteria": "An HTML report containing results and visualizations is generated, saved as `results/report.html`.",
77
+ "category": "Visualization",
78
+ "satisfied": null
79
+ },
80
+ {
81
+ "requirement_id": 7,
82
+ "prerequisites": [
83
+ 1,
84
+ 2,
85
+ 3
86
+ ],
87
+ "criteria": "A performance comparison with another model (e.g., Logistic Regression) is included to validate the robustness of the XGBoost model.",
88
+ "category": "Other",
89
+ "satisfied": null
90
+ },
91
+ {
92
+ "requirement_id": 8,
93
+ "prerequisites": [
94
+ 1,
95
+ 2,
96
+ 3
97
+ ],
98
+ "criteria": "A XGBoost model is saved under `models/saved_models/`.",
99
+ "category": "Save Trained Model",
100
+ "satisfied": null
101
+ }
102
+ ],
103
+ "preferences": [
104
+ {
105
+ "preference_id": 0,
106
+ "criteria": "The SHAP visualizations should be clear and highlight the most impactful features, making the results easy to interpret.",
107
+ "satisfied": null
108
+ }
109
+ ],
110
+ "is_kaggle_api_needed": false,
111
+ "is_training_needed": true,
112
+ "is_web_navigation_needed": true
113
+ }
instances/18_Image_Enhancement_SRGAN_DIV2K_DL.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "18_Image_Enhancement_SRGAN_DIV2K_DL",
3
+ "query": "I need to create a system for image enhancement using an SRGAN model (you can obtain a pre-trained SRGAN [here](https://github.com/tensorlayer/srgan)) with the DIV2K dataset, which can be downloaded from [this link](https://data.vision.ee.ethz.ch/cvl/DIV2K/). The dataset should be loaded in the `src/data_loader.py` file. The system should preprocess the images, including resizing and normalization, in `src/data_loader.py`. Use a pre-trained model saved under `models/saved_models/` to save time, and save the enhanced images to the `results/figures/` directory. Additionally, the system should visualize and save the comparison between the original and enhanced images to `results/figures/enhanced_comparison.png`. Finally, create a Markdown report with results and visualizations on a diverse set of samples to showcase the model's performance on various types of images, and save it as `results/report.md`. The report should include a detailed comparison of the model's performance on these selected samples, highlighting where the model excels or struggles.",
4
+ "tags": [
5
+ "Computer Vision",
6
+ "Generative Models"
7
+ ],
8
+ "requirements": [
9
+ {
10
+ "requirement_id": 0,
11
+ "prerequisites": [],
12
+ "criteria": "The \"DIV2K\" dataset is loaded in the `src/data_loader.py` file.",
13
+ "category": "Dataset or Environment",
14
+ "satisfied": null
15
+ },
16
+ {
17
+ "requirement_id": 1,
18
+ "prerequisites": [],
19
+ "criteria": "A pre-trained \"SRGAN\" model is saved under `models/saved_models/`.",
20
+ "category": "Save Trained Model",
21
+ "satisfied": null
22
+ },
23
+ {
24
+ "requirement_id": 2,
25
+ "prerequisites": [
26
+ 0
27
+ ],
28
+ "criteria": "Image preprocessing, including resizing and normalization, is implemented in `src/data_loader.py`.",
29
+ "category": "Data preprocessing and postprocessing",
30
+ "satisfied": null
31
+ },
32
+ {
33
+ "requirement_id": 3,
34
+ "prerequisites": [
35
+ 0,
36
+ 1,
37
+ 2
38
+ ],
39
+ "criteria": "Enhanced images are saved to the specified folder `results/figures/`.",
40
+ "category": "Data preprocessing and postprocessing",
41
+ "satisfied": null
42
+ },
43
+ {
44
+ "requirement_id": 4,
45
+ "prerequisites": [
46
+ 0,
47
+ 1,
48
+ 2,
49
+ 3
50
+ ],
51
+ "criteria": "The comparison of original and enhanced images is visualized and saved as `results/figures/enhanced_comparison.png`.",
52
+ "category": "Visualization",
53
+ "satisfied": null
54
+ },
55
+ {
56
+ "requirement_id": 5,
57
+ "prerequisites": [
58
+ 1,
59
+ 2,
60
+ 3,
61
+ 4
62
+ ],
63
+ "criteria": "A Markdown file containing results and visualizations is generated and saved as `results/report.md`.",
64
+ "category": "Visualization",
65
+ "satisfied": null
66
+ }
67
+ ],
68
+ "preferences": [
69
+ {
70
+ "preference_id": 0,
71
+ "criteria": "A diverse set of samples should be selected to showcase the model's performance across different types of images.",
72
+ "satisfied": null
73
+ },
74
+ {
75
+ "preference_id": 1,
76
+ "criteria": "The Markdown report should include a detailed comparison of the model's performance on these selected samples, highlighting where the model excels or struggles.",
77
+ "satisfied": null
78
+ }
79
+ ],
80
+ "is_kaggle_api_needed": false,
81
+ "is_training_needed": false,
82
+ "is_web_navigation_needed": true
83
+ }
instances/19_Time_Series_Forecasting_Seq2Seq_LSTM_Rossmann_ML.json ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "19_Time_Series_Forecasting_Seq2Seq_LSTM_Rossmann_ML",
3
+ "query": "Develop a sales forecasting system using a sequence-to-sequence model based on LSTM with the Rossmann Store Sales dataset, downloading it from Kaggle [here](https://www.kaggle.com/c/rossmann-store-sales/data) and loading it in `src/data_loader.py`. Split the data into training and testing sets and save them in `src/data_loader.py`. Apply a sequence-to-sequence model based on `LSTM` and save the trained model under the `models/saved_models/` directory. Save the forecast results as `results/figures/forecast_results.png`. Save a comparison plot between the predicted and actual values to `results/figures/comparison_plot.png`. Generate an HTML report that includes the prediction results and comparison plots, with some interactive elements for exploring different forecast horizons, and save it as `results/report.html`. Ensure the model is tuned to capture seasonal trends in the sales data.",
4
+ "tags": [
5
+ "Supervised Learning",
6
+ "Time Series Forecasting"
7
+ ],
8
+ "requirements": [
9
+ {
10
+ "requirement_id": 0,
11
+ "prerequisites": [],
12
+ "criteria": "The \"Rossmann Store Sales\" dataset is used, potentially downloaded from Kaggle [this link](https://www.kaggle.com/c/rossmann-store-sales/data) and loaded in `src/data_loader.py`.",
13
+ "category": "Dataset or Environment",
14
+ "satisfied": null
15
+ },
16
+ {
17
+ "requirement_id": 1,
18
+ "prerequisites": [
19
+ 0
20
+ ],
21
+ "criteria": "The data is split into training and testing sets and implemented in `src/data_loader.py`.",
22
+ "category": "Data preprocessing and postprocessing",
23
+ "satisfied": null
24
+ },
25
+ {
26
+ "requirement_id": 2,
27
+ "prerequisites": [
28
+ 1
29
+ ],
30
+ "criteria": "A sequence-to-sequence model based on \"LSTM\" is used. Please save the trained model under the `models/saved_models/` directory.",
31
+ "category": "Machine Learning Method",
32
+ "satisfied": null
33
+ },
34
+ {
35
+ "requirement_id": 3,
36
+ "prerequisites": [
37
+ 1,
38
+ 2
39
+ ],
40
+ "criteria": "The forecast results are plotted and saved as `results/figures/forecast_results.png`.",
41
+ "category": "Visualization",
42
+ "satisfied": null
43
+ },
44
+ {
45
+ "requirement_id": 4,
46
+ "prerequisites": [
47
+ 1,
48
+ 2,
49
+ 3
50
+ ],
51
+ "criteria": "A comparison plot of predicted vs. actual values is saved as `results/figures/comparison_plot.png`.",
52
+ "category": "Visualization",
53
+ "satisfied": null
54
+ },
55
+ {
56
+ "requirement_id": 5,
57
+ "prerequisites": [
58
+ 1,
59
+ 2,
60
+ 3,
61
+ 4
62
+ ],
63
+ "criteria": "An HTML report containing forecast results and comparison plots is generated and saved as `results/report.html`.",
64
+ "category": "Visualization",
65
+ "satisfied": null
66
+ },
67
+ {
68
+ "requirement_id": 6,
69
+ "prerequisites": [
70
+ 5
71
+ ],
72
+ "criteria": "The HTML report should include interactive elements that allow users to explore different forecast horizons.",
73
+ "category": "Human Computer Interaction",
74
+ "satisfied": null
75
+ }
76
+ ],
77
+ "preferences": [
78
+ {
79
+ "preference_id": 0,
80
+ "criteria": "The model should be tuned to capture seasonal trends in the sales data for more accurate forecasting.",
81
+ "satisfied": null
82
+ }
83
+ ],
84
+ "is_kaggle_api_needed": true,
85
+ "is_training_needed": true,
86
+ "is_web_navigation_needed": true
87
+ }
instances/20_Car_Price_Prediction_RandomForest_CarPrices_ML.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "20_Car_Price_Prediction_RandomForest_CarPrices_ML",
3
+ "query": "Can you help me create a car price prediction project using a Random Forest model with the Kaggle Car Prices dataset? Load the dataset and perform feature selection to identify important features in `src/data_loader.py`. Use cross-validation to evaluate the model in `src/train.py`. Save the R-squared score, Mean Squared Error (MSE), and Mean Absolute Error (MAE) to `results/metrics/results/metrics.txt`. Visualize the feature importance and save it to `results/figures/feature_importance.png`. Generate a Markdown report with insights into how the selected features contribute to the car price predictions. Saving the report as `results/report.md`.",
4
+ "tags": [
5
+ "Financial Analysis",
6
+ "Regression",
7
+ "Supervised Learning"
8
+ ],
9
+ "requirements": [
10
+ {
11
+ "requirement_id": 0,
12
+ "prerequisites": [],
13
+ "criteria": "The \"Kaggle Car Prices\" dataset is loaded in `src/data_loader.py`.",
14
+ "category": "Dataset or Environment",
15
+ "satisfied": null
16
+ },
17
+ {
18
+ "requirement_id": 1,
19
+ "prerequisites": [
20
+ 0
21
+ ],
22
+ "criteria": "Feature selection is implemented to identify important features in `src/data_loader.py`.",
23
+ "category": "Data preprocessing and postprocessing",
24
+ "satisfied": null
25
+ },
26
+ {
27
+ "requirement_id": 2,
28
+ "prerequisites": [],
29
+ "criteria": "The \"Random Forest\" regression model is used in `src/model.py`.",
30
+ "category": "Machine Learning Method",
31
+ "satisfied": null
32
+ },
33
+ {
34
+ "requirement_id": 3,
35
+ "prerequisites": [
36
+ 0,
37
+ 1,
38
+ 2
39
+ ],
40
+ "criteria": "Cross-validation is used to evaluate the model in `src/train.py`.",
41
+ "category": "Performance Metrics",
42
+ "satisfied": null
43
+ },
44
+ {
45
+ "requirement_id": 4,
46
+ "prerequisites": [
47
+ 1,
48
+ 2,
49
+ 3
50
+ ],
51
+ "criteria": "The \"R-squared\" score, \"Mean Squared Error (MSE),\" and \"Mean Absolute Error (MAE)\" are saved in `results/metrics/results/metrics.txt`.",
52
+ "category": "Performance Metrics",
53
+ "satisfied": null
54
+ },
55
+ {
56
+ "requirement_id": 5,
57
+ "prerequisites": [
58
+ 1,
59
+ 2,
60
+ 3
61
+ ],
62
+ "criteria": "Feature importances are visualized and saved as `results/figures/feature_importance.png`.",
63
+ "category": "Visualization",
64
+ "satisfied": null
65
+ },
66
+ {
67
+ "requirement_id": 6,
68
+ "prerequisites": [
69
+ 1,
70
+ 2,
71
+ 3,
72
+ 4,
73
+ 5
74
+ ],
75
+ "criteria": "A Markdown file containing results and visualizations is generated and saved as `results/report.md`.",
76
+ "category": "Visualization",
77
+ "satisfied": null
78
+ }
79
+ ],
80
+ "preferences": [
81
+ {
82
+ "preference_id": 0,
83
+ "criteria": "The feature selection process should be thorough, ensuring that only the most relevant features are used in the model.",
84
+ "satisfied": null
85
+ },
86
+ {
87
+ "preference_id": 1,
88
+ "criteria": "The Markdown report should provide clear insights into how the selected features contribute to the car price predictions.",
89
+ "satisfied": null
90
+ }
91
+ ],
92
+ "is_kaggle_api_needed": true,
93
+ "is_training_needed": true,
94
+ "is_web_navigation_needed": false
95
+ }
instances/21_Iris_Classification_SVM_Iris_ML.json ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "21_Iris_Classification_SVM_Iris_ML",
3
+ "query": "I request a project to classify iris species utilizing the Iris dataset with a Support Vector Machine (SVM) classifier implemented in `src/model.py`. The project should standardize the data in and perform feature selection in `src/data_loader.py`. It will document the classification accuracy and save it as `results/metrics/classification_accuracy.txt`, and generate and save a confusion matrix as `results/figures/confusion_matrix.png`. It will further create an interactive web application in `src/app.py` using Streamlit to showcase classification results and model performance, with the figures stored in `results/figures/`. The web page should be user-friendly, with a brief explanation of the model to help users understand how the SVM classifier works.",
4
+ "tags": [
5
+ "Classification",
6
+ "Supervised Learning"
7
+ ],
8
+ "requirements": [
9
+ {
10
+ "requirement_id": 0,
11
+ "prerequisites": [],
12
+ "criteria": "The \"Iris\" dataset is used.",
13
+ "category": "Dataset or Environment",
14
+ "satisfied": null
15
+ },
16
+ {
17
+ "requirement_id": 1,
18
+ "prerequisites": [
19
+ 0
20
+ ],
21
+ "criteria": "Data is standardized to ensure feature values are within the same range in `src/data_loader.py`.",
22
+ "category": "Data preprocessing and postprocessing",
23
+ "satisfied": null
24
+ },
25
+ {
26
+ "requirement_id": 2,
27
+ "prerequisites": [
28
+ 0
29
+ ],
30
+ "criteria": "Feature selection is performed to identify important features in `src/data_loader.py`.",
31
+ "category": "Data preprocessing and postprocessing",
32
+ "satisfied": null
33
+ },
34
+ {
35
+ "requirement_id": 3,
36
+ "prerequisites": [],
37
+ "criteria": "The \"SVM classifier\" is implemented in `src/model.py`.",
38
+ "category": "Machine Learning Method",
39
+ "satisfied": null
40
+ },
41
+ {
42
+ "requirement_id": 4,
43
+ "prerequisites": [
44
+ 1,
45
+ 2,
46
+ 3
47
+ ],
48
+ "criteria": "Classification accuracy is saved in `results/metrics/classification_accuracy.txt`.",
49
+ "category": "Performance Metrics",
50
+ "satisfied": null
51
+ },
52
+ {
53
+ "requirement_id": 5,
54
+ "prerequisites": [
55
+ 1,
56
+ 2,
57
+ 3
58
+ ],
59
+ "criteria": "A confusion matrix is generated and saved as `results/figures/confusion_matrix.png`.",
60
+ "category": "Visualization",
61
+ "satisfied": null
62
+ },
63
+ {
64
+ "requirement_id": 6,
65
+ "prerequisites": [
66
+ 2,
67
+ 3,
68
+ 4,
69
+ 5
70
+ ],
71
+ "criteria": "An interactive web application `src/app.py` is created using \"Streamlit\"` to showcase classification results and model performance in results/figures/.",
72
+ "category": "Human Computer Interaction",
73
+ "satisfied": null
74
+ }
75
+ ],
76
+ "preferences": [
77
+ {
78
+ "preference_id": 0,
79
+ "criteria": "The Streamlit web page should be user-friendly, allowing users to easily explore different aspects of the model's performance.",
80
+ "satisfied": null
81
+ },
82
+ {
83
+ "preference_id": 1,
84
+ "criteria": "A brief model explanation should be included on the web page, helping users understand how the SVM classifier works.",
85
+ "satisfied": null
86
+ }
87
+ ],
88
+ "is_kaggle_api_needed": false,
89
+ "is_training_needed": true,
90
+ "is_web_navigation_needed": false
91
+ }
instances/22_Sentiment_Analysis_LSTM_IMDb_DL.json ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "22_Sentiment_Analysis_LSTM_IMDb_DL",
3
+ "query": "Could you help me set up a sentiment analysis project using an LSTM model and the IMDb dataset? Please implement data cleaning in `src/data_loader.py`, including the removal of stop words and punctuation. Use word embeddings to convert the text to a numerical format and save these embeddings under `models/saved_models/`. Then use these embeddings as input of an LSTM model, which should be implemented in `src/model.py`. Save the classification report to `results/metrics/classification_report.txt`. Create a Jupyter Notebook saved as `results/report.ipynb` with the model architecture and training process visualized. Also, save the training loss and accuracy curves to `results/figures/training_curves.png`. Pre-trained embeddings (e.g., Word2Vec or GloVe) are preferred to enhance model performance.",
4
+ "tags": [
5
+ "Natural Language Processing",
6
+ "Supervised Learning"
7
+ ],
8
+ "requirements": [
9
+ {
10
+ "requirement_id": 0,
11
+ "prerequisites": [],
12
+ "criteria": "The \"IMDb\" movie reviews dataset is used.",
13
+ "category": "Dataset or Environment",
14
+ "satisfied": null
15
+ },
16
+ {
17
+ "requirement_id": 1,
18
+ "prerequisites": [
19
+ 0
20
+ ],
21
+ "criteria": "Data cleaning is implemented in `src/data_loader.py`, including the removal of stop words and punctuation.",
22
+ "category": "Data preprocessing and postprocessing",
23
+ "satisfied": null
24
+ },
25
+ {
26
+ "requirement_id": 2,
27
+ "prerequisites": [
28
+ 0,
29
+ 1
30
+ ],
31
+ "criteria": "Word embeddings are used to convert text to numerical format and saved under `models/saved_models/`.",
32
+ "category": "Data preprocessing and postprocessing",
33
+ "satisfied": null
34
+ },
35
+ {
36
+ "requirement_id": 3,
37
+ "prerequisites": [],
38
+ "criteria": "An \"LSTM\" model is used for sentiment analysis and should be implemented in `src/model.py`.",
39
+ "category": "Machine Learning Method",
40
+ "satisfied": null
41
+ },
42
+ {
43
+ "requirement_id": 4,
44
+ "prerequisites": [
45
+ 2,
46
+ 3
47
+ ],
48
+ "criteria": "A classification report is saved as `results/metrics/classification_report.txt`.",
49
+ "category": "Performance Metrics",
50
+ "satisfied": null
51
+ },
52
+ {
53
+ "requirement_id": 5,
54
+ "prerequisites": [
55
+ 2,
56
+ 3
57
+ ],
58
+ "criteria": "A Jupyter Notebook containing the model architecture and training process visualization is generated and saved as `results/report.ipynb`.",
59
+ "category": "Visualization",
60
+ "satisfied": null
61
+ },
62
+ {
63
+ "requirement_id": 6,
64
+ "prerequisites": [
65
+ 2,
66
+ 3
67
+ ],
68
+ "criteria": "Training loss and accuracy curves are generated and saved as `results/figures/training_curves.png`.",
69
+ "category": "Visualization",
70
+ "satisfied": null
71
+ }
72
+ ],
73
+ "preferences": [
74
+ {
75
+ "preference_id": 0,
76
+ "criteria": "The word embeddings should be pre-trained (e.g., Word2Vec or GloVe) to leverage existing semantic knowledge.",
77
+ "satisfied": null
78
+ },
79
+ {
80
+ "preference_id": 1,
81
+ "criteria": "The Jupyter Notebook should be well-documented, making it easy for others to understand the model architecture and training process.",
82
+ "satisfied": null
83
+ }
84
+ ],
85
+ "is_kaggle_api_needed": false,
86
+ "is_training_needed": true,
87
+ "is_web_navigation_needed": false
88
+ }
instances/23_Wine_Quality_Prediction_DecisionTree_WineQuality_ML.json ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "23_Wine_Quality_Prediction_DecisionTree_WineQuality_ML",
3
+ "query": "Build a wine quality prediction system using a Decision Tree model with the Wine Quality dataset from UCI. Preprocess the data in `src/data_loader.py`, including handling missing values and feature scaling. Use cross-validation to evaluate the model in `src/train.py`. Implement the Decision Tree regression model in `src/model.py`.Save the mean squared error in `results/metrics/mean_squared_error.txt`. Visualize and save feature importance as `results/figures/feature_importance.png`. Create a Jupyter Notebook with results and visualizations, and summarize your observations. The Notebook should thoroughly document the preprocessing steps to ensure reproducibility. Convert the Notebook to a PDF report and save it as `results/report.pdf`. The PDF report should also include a brief discussion on potential improvements of the model.",
4
+ "tags": [
5
+ "Classification",
6
+ "Supervised Learning"
7
+ ],
8
+ "requirements": [
9
+ {
10
+ "requirement_id": 0,
11
+ "prerequisites": [],
12
+ "criteria": "The \"Wine Quality\" dataset from \"UCI\" is used.",
13
+ "category": "Dataset or Environment",
14
+ "satisfied": null
15
+ },
16
+ {
17
+ "requirement_id": 1,
18
+ "prerequisites": [
19
+ 0
20
+ ],
21
+ "criteria": "Data preprocessing is performed in `src/data_loader.py`, including handling missing values and feature scaling.",
22
+ "category": "Data preprocessing and postprocessing",
23
+ "satisfied": null
24
+ },
25
+ {
26
+ "requirement_id": 2,
27
+ "prerequisites": [],
28
+ "criteria": "The \"Decision Tree\" regression model is implemented in `src/model.py`.",
29
+ "category": "Machine Learning Method",
30
+ "satisfied": null
31
+ },
32
+ {
33
+ "requirement_id": 3,
34
+ "prerequisites": [
35
+ 0,
36
+ 1,
37
+ 2
38
+ ],
39
+ "criteria": "Cross-validation is used to evaluate the model in `src/train.py`.",
40
+ "category": "Performance Metrics",
41
+ "satisfied": null
42
+ },
43
+ {
44
+ "requirement_id": 4,
45
+ "prerequisites": [
46
+ 0,
47
+ 1,
48
+ 2,
49
+ 3
50
+ ],
51
+ "criteria": "The Mean Squared Error (MSE) is saved in `results/metrics/mean_squared_error.txt`.",
52
+ "category": "Performance Metrics",
53
+ "satisfied": null
54
+ },
55
+ {
56
+ "requirement_id": 5,
57
+ "prerequisites": [
58
+ 0,
59
+ 1,
60
+ 2,
61
+ 3
62
+ ],
63
+ "criteria": "The feature importance plot is generated and saved as `results/figures/feature_importance.png`.",
64
+ "category": "Visualization",
65
+ "satisfied": null
66
+ },
67
+ {
68
+ "requirement_id": 6,
69
+ "prerequisites": [
70
+ 0,
71
+ 1,
72
+ 2,
73
+ 3,
74
+ 4,
75
+ 5
76
+ ],
77
+ "criteria": "A Jupyter Notebook containing preprocessing steps, results and visualizations is generated with observations summarized. The Notebook is converted to a PDF report and saved as `results/report.pdf`.",
78
+ "category": "Visualization",
79
+ "satisfied": null
80
+ }
81
+ ],
82
+ "preferences": [
83
+ {
84
+ "preference_id": 0,
85
+ "criteria": "The feature importance plot should clearly highlight the top influential features.",
86
+ "satisfied": null
87
+ },
88
+ {
89
+ "preference_id": 1,
90
+ "criteria": "The final PDF report should include a brief discussion on potential improvements of the model.",
91
+ "satisfied": null
92
+ }
93
+ ],
94
+ "is_kaggle_api_needed": false,
95
+ "is_training_needed": true,
96
+ "is_web_navigation_needed": false
97
+ }
instances/24_Diabetes_Prediction_LogisticRegression_PimaIndians_ML.json ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "24_Diabetes_Prediction_LogisticRegression_PimaIndians_ML",
3
+ "query": "Set up a diabetes prediction project using a Logistic Regression model and the Pima Indians Diabetes dataset. Perform feature scaling and data standardization in `src/data_loader.py`. Use cross-validation to evaluate the model in `src/train.py`, and save the accuracy score to `results/metrics/accuracy_score.txt`. Generate and save the ROC curve to `results/figures/roc_curve.png`. Create an interactive dashboard using Tableau or Power BI to showcase the model's performance and highlight important features. Ensure the dashboard is user-friendly and document the dataset processing and visualization creation steps. During development, the system should automatically manage the opening and closing of Tableau or Power BI to prevent unnecessary blocking.",
4
+ "tags": [
5
+ "Classification",
6
+ "Medical Analysis",
7
+ "Supervised Learning"
8
+ ],
9
+ "requirements": [
10
+ {
11
+ "requirement_id": 0,
12
+ "prerequisites": [],
13
+ "criteria": "The \"Pima Indians Diabetes\" dataset is used.",
14
+ "category": "Dataset or Environment",
15
+ "satisfied": null
16
+ },
17
+ {
18
+ "requirement_id": 1,
19
+ "prerequisites": [
20
+ 0
21
+ ],
22
+ "criteria": "Feature scaling and data standardization are implemented in `src/data_loader.py`.",
23
+ "category": "Data preprocessing and postprocessing",
24
+ "satisfied": null
25
+ },
26
+ {
27
+ "requirement_id": 2,
28
+ "prerequisites": [
29
+ 1
30
+ ],
31
+ "criteria": "A \"Logistic Regression\" model is implemented in `src/model.py`.",
32
+ "category": "Machine Learning Method",
33
+ "satisfied": null
34
+ },
35
+ {
36
+ "requirement_id": 3,
37
+ "prerequisites": [
38
+ 0,
39
+ 1,
40
+ 2
41
+ ],
42
+ "criteria": "Cross-validation is used to evaluate the model in `src/train.py`.",
43
+ "category": "Performance Metrics",
44
+ "satisfied": null
45
+ },
46
+ {
47
+ "requirement_id": 4,
48
+ "prerequisites": [
49
+ 1,
50
+ 2,
51
+ 3
52
+ ],
53
+ "criteria": "The accuracy score is saved in `results/metrics/accuracy_score.txt`.",
54
+ "category": "Performance Metrics",
55
+ "satisfied": null
56
+ },
57
+ {
58
+ "requirement_id": 5,
59
+ "prerequisites": [
60
+ 1,
61
+ 2,
62
+ 3
63
+ ],
64
+ "criteria": "The ROC curve is generated and saved as `results/figures/roc_curve.png`.",
65
+ "category": "Visualization",
66
+ "satisfied": null
67
+ },
68
+ {
69
+ "requirement_id": 6,
70
+ "prerequisites": [
71
+ 1,
72
+ 2,
73
+ 3,
74
+ 4,
75
+ 5
76
+ ],
77
+ "criteria": "An interactive visualization dashboard using \"Tableau\" or \"Power BI\" is created to showcase model performance and important features. ",
78
+ "category": "Visualization",
79
+ "satisfied": null
80
+ }
81
+ ],
82
+ "preferences": [
83
+ {
84
+ "preference_id": 0,
85
+ "criteria": "The dashboard should allow users to explore different aspects of the model's performance and understand which features contribute most to predictions.",
86
+ "satisfied": null
87
+ },
88
+ {
89
+ "preference_id": 1,
90
+ "criteria": "The dashboard should clearly show how the dataset was processed and how the visualizations were created.",
91
+ "satisfied": null
92
+ },
93
+ {
94
+ "preference_id": 2,
95
+ "criteria": "During development, the system should automatically open and close \"Tableau\" or \"Power BI\" as needed to avoid long periods of blocking or inactivity.",
96
+ "satisfied": null
97
+ }
98
+ ],
99
+ "is_kaggle_api_needed": false,
100
+ "is_training_needed": true,
101
+ "is_web_navigation_needed": false
102
+ }
instances/25_Speech_Emotion_Recognition_CNN_LSTM_RAVDESS_DL.json ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "25_Speech_Emotion_Recognition_CNN_LSTM_RAVDESS_DL",
3
+ "query": "I am seeking a speech emotion recognition project using a CNN-LSTM model with the RAVDESS dataset, which should be downloaded from Kaggle or [this Hugging Face link](https://huggingface.co/datasets/xbgoose/ravdess). The project should load the dataset and perform robust audio preprocessing (noise removal and normalization) and MFCC feature extraction, implemented in `src/data_loader.py`. The CNN-LSTM model should be implemented in `src/model.py`. Recognition accuracy should be saved in `results/metrics/recognition_accuracy.txt`, and a confusion matrix should be generated and saved as `results/figures/confusion_matrix.png`. Additionally, a user-friendly local API should be created using Flask to allow users to upload audio files and receive emotion recognition results, with the implementation included in `src/hci.py`.",
4
+ "tags": [
5
+ "Audio Processing",
6
+ "Classification"
7
+ ],
8
+ "requirements": [
9
+ {
10
+ "requirement_id": 0,
11
+ "prerequisites": [],
12
+ "criteria": "The \"RAVDESS\" dataset is loaded in `src/data_loader.py`, which is downloaded from Kaggle or [this Hugging Face link](https://huggingface.co/datasets/xbgoose/ravdess).",
13
+ "category": "Dataset or Environment",
14
+ "satisfied": null
15
+ },
16
+ {
17
+ "requirement_id": 1,
18
+ "prerequisites": [
19
+ 0
20
+ ],
21
+ "criteria": "Audio preprocessing, including noise removal and normalization, is implemented in `src/data_loader.py`.",
22
+ "category": "Data preprocessing and postprocessing",
23
+ "satisfied": null
24
+ },
25
+ {
26
+ "requirement_id": 2,
27
+ "prerequisites": [
28
+ 0,
29
+ 1
30
+ ],
31
+ "criteria": "MFCC feature extraction is implemented in `src/data_loader.py`.",
32
+ "category": "Data preprocessing and postprocessing",
33
+ "satisfied": null
34
+ },
35
+ {
36
+ "requirement_id": 3,
37
+ "prerequisites": [],
38
+ "criteria": "The \"CNN-LSTM\" model is implemented in `src/model.py`.",
39
+ "category": "Machine Learning Method",
40
+ "satisfied": null
41
+ },
42
+ {
43
+ "requirement_id": 4,
44
+ "prerequisites": [
45
+ 2,
46
+ 3
47
+ ],
48
+ "criteria": "Recognition accuracy is saved in `results/metrics/recognition_accuracy.txt`.",
49
+ "category": "Performance Metrics",
50
+ "satisfied": null
51
+ },
52
+ {
53
+ "requirement_id": 5,
54
+ "prerequisites": [
55
+ 2,
56
+ 3,
57
+ 4
58
+ ],
59
+ "criteria": "The confusion matrix is generated and saved as `results/figures/confusion_matrix.png`.",
60
+ "category": "Visualization",
61
+ "satisfied": null
62
+ },
63
+ {
64
+ "requirement_id": 6,
65
+ "prerequisites": [
66
+ 2,
67
+ 3
68
+ ],
69
+ "criteria": "A local API is created using \"Flask\" to allow users to upload audio files and receive emotion recognition results. The implementation should be included in `src/hci.py`.",
70
+ "category": "Human Computer Interaction",
71
+ "satisfied": null
72
+ }
73
+ ],
74
+ "preferences": [
75
+ {
76
+ "preference_id": 0,
77
+ "criteria": "The audio preprocessing step should be robust, effectively reducing noise while preserving the integrity of the speech signals.",
78
+ "satisfied": null
79
+ },
80
+ {
81
+ "preference_id": 1,
82
+ "criteria": "The local API should be user-friendly, with clear instructions for uploading files and interpreting results.",
83
+ "satisfied": null
84
+ }
85
+ ],
86
+ "is_kaggle_api_needed": true,
87
+ "is_training_needed": true,
88
+ "is_web_navigation_needed": true
89
+ }
instances/26_Mushroom_Classification_RandomForest_Mushroom_ML.json ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "26_Mushroom_Classification_RandomForest_Mushroom_ML",
3
+ "query": "Develop a mushroom classification system using a Random Forest model on the UCI Mushroom dataset. Load the dataset in the `src/data_loader.py` file. Ensure that feature engineering, including feature encoding and feature selection, and missing data handling are completed in `src/data_loader.py` before training the model. Train the Random Forest classifier on the processed dataset in `src/train.py`. Save the classification results `results/classification_results.txt`. Visualize and save the feature importance as `results/figures/feature_importance.png`, ensuring the visualization clearly highlights the most influential features. Create an interactive web page in `src/app.py` using Streamlit to showcase the classification results and model performance. The Streamlit web page should provide an overview of the model's performance and allow users to interact with the classification results. The system should manages the start and end of the Streamlit visualization properly.",
4
+ "tags": [
5
+ "Classification",
6
+ "Supervised Learning"
7
+ ],
8
+ "requirements": [
9
+ {
10
+ "requirement_id": 0,
11
+ "prerequisites": [],
12
+ "criteria": "The \"UCI Mushroom\" dataset is loaded in the `src/data_loader.py` file.",
13
+ "category": "Dataset or Environment",
14
+ "satisfied": null
15
+ },
16
+ {
17
+ "requirement_id": 1,
18
+ "prerequisites": [
19
+ 0
20
+ ],
21
+ "criteria": "Feature engineering is performed, including feature encoding and feature selection in `src/data_loader.py`.",
22
+ "category": "Data preprocessing and postprocessing",
23
+ "satisfied": null
24
+ },
25
+ {
26
+ "requirement_id": 2,
27
+ "prerequisites": [
28
+ 0,
29
+ 1
30
+ ],
31
+ "criteria": "Missing data is handled to ensure the dataset is clean before training in `src/data_loader.py`.",
32
+ "category": "Data preprocessing and postprocessing",
33
+ "satisfied": null
34
+ },
35
+ {
36
+ "requirement_id": 3,
37
+ "prerequisites": [],
38
+ "criteria": "A Random Forest classifier is trained on the processed dataset in `src/train.py`.",
39
+ "category": "Machine Learning Method",
40
+ "satisfied": null
41
+ },
42
+ {
43
+ "requirement_id": 4,
44
+ "prerequisites": [
45
+ 2,
46
+ 3
47
+ ],
48
+ "criteria": "The classification results are saved in `results/classification_results.txt`.",
49
+ "category": "Performance Metrics",
50
+ "satisfied": null
51
+ },
52
+ {
53
+ "requirement_id": 5,
54
+ "prerequisites": [
55
+ 2,
56
+ 3
57
+ ],
58
+ "criteria": "Feature importance is visualized and saved as `results/figures/feature_importance.png`.",
59
+ "category": "Visualization",
60
+ "satisfied": null
61
+ },
62
+ {
63
+ "requirement_id": 6,
64
+ "prerequisites": [
65
+ 2,
66
+ 3,
67
+ 4
68
+ ],
69
+ "criteria": "An interactive web page is created in `src/app.py` using \"Streamlit\" to showcase classification results and model performance.",
70
+ "category": "Human Computer Interaction",
71
+ "satisfied": null
72
+ }
73
+ ],
74
+ "preferences": [
75
+ {
76
+ "preference_id": 0,
77
+ "criteria": "The feature importance visualization should clearly highlight the most influential features, making it easy to interpret.",
78
+ "satisfied": null
79
+ },
80
+ {
81
+ "preference_id": 1,
82
+ "criteria": "The Streamlit web page should provide an overview of the model's performance and allow users to interact with the classification results.",
83
+ "satisfied": null
84
+ },
85
+ {
86
+ "preference_id": 2,
87
+ "criteria": "The system properly manages the start and end of the Streamlit visualization .",
88
+ "satisfied": null
89
+ }
90
+ ],
91
+ "is_kaggle_api_needed": false,
92
+ "is_training_needed": true,
93
+ "is_web_navigation_needed": false
94
+ }
instances/27_Image_Generation_DCGAN_MNIST_DL.json ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "27_Image_Generation_DCGAN_MNIST_DL",
3
+ "query": "I need to create a system for image generation using a DCGAN model with the MNIST`dataset. Load the MNIST dataset in `src/data_loader.py` and implement the DCGAN model in `src/model.py`. The system should ensure the use of the correct DCGAN architecture, save the generated images to `results/figures/`, monitor the model training by recording training loss under `results/metrics/` and generated images under `results/figures/`, and perform a hyperparameter search on the generation parameters such as noise vector dimensions and learning rate in `src/train.py` to improve performance. Additionally, create and save a GIF animation of the generated images to `results/figures/generated_images.gif`, present the training process and results in a well-structured Jupyter Notebook, and convert the Notebook into a polished PDF report saved as `results/training_report.pdf`. The DCGAN model architecture should be clearly documented in the Notebook to avoid confusion with other GAN variants.",
4
+ "tags": [
5
+ "Computer Vision",
6
+ "Generative Models"
7
+ ],
8
+ "requirements": [
9
+ {
10
+ "requirement_id": 0,
11
+ "prerequisites": [],
12
+ "criteria": "The \"MNIST\" dataset is loaded in `src/data_loader.py`.",
13
+ "category": "Dataset or Environment",
14
+ "satisfied": null
15
+ },
16
+ {
17
+ "requirement_id": 1,
18
+ "prerequisites": [],
19
+ "criteria": "The \"DCGAN\" model, not a standard GAN, is implemented in `src/model.py`.",
20
+ "category": "Machine Learning Method",
21
+ "satisfied": null
22
+ },
23
+ {
24
+ "requirement_id": 2,
25
+ "prerequisites": [
26
+ 0,
27
+ 1
28
+ ],
29
+ "criteria": "Generated images are saved to the specified folder `results/figures/`.",
30
+ "category": "Save Trained Model",
31
+ "satisfied": null
32
+ },
33
+ {
34
+ "requirement_id": 3,
35
+ "prerequisites": [
36
+ 0,
37
+ 1
38
+ ],
39
+ "criteria": "The model training is monitored by recording training loss saved under `results/metrics/`",
40
+ "category": "Performance Metrics",
41
+ "satisfied": null
42
+ },
43
+ {
44
+ "requirement_id": 4,
45
+ "prerequisites": [
46
+ 0,
47
+ 1
48
+ ],
49
+ "criteria": "A hyperparemeter search method to search parameters such as noise vector dimensions and learning rate is implemented in `src/train.py` to improve model performance.",
50
+ "category": "Machine Learning Method",
51
+ "satisfied": null
52
+ },
53
+ {
54
+ "requirement_id": 5,
55
+ "prerequisites": [
56
+ 1,
57
+ 2,
58
+ 3,
59
+ 4
60
+ ],
61
+ "criteria": "A GIF animation of generated images is created and saved as `results/figures/generated_images.gif`.",
62
+ "category": "Visualization",
63
+ "satisfied": null
64
+ },
65
+ {
66
+ "requirement_id": 6,
67
+ "prerequisites": [
68
+ 1,
69
+ 2,
70
+ 3,
71
+ 4
72
+ ],
73
+ "criteria": "The training process and results are presented in a Jupyter Notebook, and converted to a PDF report, and saved as `results/training_report.pdf`.",
74
+ "category": "Visualization",
75
+ "satisfied": null
76
+ }
77
+ ],
78
+ "preferences": [
79
+ {
80
+ "preference_id": 0,
81
+ "criteria": "The DCGAN model architecture should be clearly documented in the Notebook to avoid confusion with other GAN variants.",
82
+ "satisfied": null
83
+ },
84
+ {
85
+ "preference_id": 1,
86
+ "criteria": "The PDF report should be well-structured, with clear sections for model architecture, training process, results, and future improvements.",
87
+ "satisfied": null
88
+ }
89
+ ],
90
+ "is_kaggle_api_needed": false,
91
+ "is_training_needed": true,
92
+ "is_web_navigation_needed": false,
93
+ "hint": "Saving figures is mentioned twice, i.e., once in requirement 2 and once in requirement 3."
94
+ }
instances/28_Stock_Price_Prediction_LSTM_YahooFinance_ML.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "28_Stock_Price_Prediction_LSTM_YahooFinance_ML",
3
+ "query": "Could you help me build a stock price prediction system using an LSTM model and the Yahoo Finance dataset? Please clean the data, including handling missing values and outliers, and use a time window to convert the time series data to a supervised learning problem. The LSTM model should be implemented in `src/model.py`, and the dataset loading, cleaning, and conversion should be implemented in `src/data_loader.py`. Save the prediction results to `results/predictions.txt` and generate and save interactive charts of the prediction results in `results/figures/prediction_interactive.html` using Plotly. Create a Jupyter Notebook with model architecture visualization, training process, and prediction results and save it as a PDF report at `results/report.pdf`.",
4
+ "tags": [
5
+ "Financial Analysis",
6
+ "Supervised Learning",
7
+ "Time Series Forecasting"
8
+ ],
9
+ "requirements": [
10
+ {
11
+ "requirement_id": 0,
12
+ "prerequisites": [],
13
+ "criteria": "The \"LSTM\" model is implemented in `src/model.py`.",
14
+ "category": "Machine Learning Method",
15
+ "satisfied": null
16
+ },
17
+ {
18
+ "requirement_id": 1,
19
+ "prerequisites": [],
20
+ "criteria": "The \"Yahoo Finance\" dataset is loaded in `src/data_loader.py`.",
21
+ "category": "Dataset or Environment",
22
+ "satisfied": null
23
+ },
24
+ {
25
+ "requirement_id": 2,
26
+ "prerequisites": [
27
+ 1
28
+ ],
29
+ "criteria": "Data cleaning, including handling missing values and outliers, is performed in `src/data_loader.py`.",
30
+ "category": "Data preprocessing and postprocessing",
31
+ "satisfied": null
32
+ },
33
+ {
34
+ "requirement_id": 3,
35
+ "prerequisites": [
36
+ 0,
37
+ 2
38
+ ],
39
+ "criteria": "A time window is used to convert the time series data to a supervised learning problem. Please save the implementation in `src/data_loader.py`.",
40
+ "category": "Data preprocessing and postprocessing",
41
+ "satisfied": null
42
+ },
43
+ {
44
+ "requirement_id": 4,
45
+ "prerequisites": [
46
+ 2,
47
+ 3
48
+ ],
49
+ "criteria": "Prediction results are saved in `results/predictions.txt`.",
50
+ "category": "Other",
51
+ "satisfied": null
52
+ },
53
+ {
54
+ "requirement_id": 5,
55
+ "prerequisites": [
56
+ 0,
57
+ 1,
58
+ 2
59
+ ],
60
+ "criteria": "Interactive charts of prediction results are generated using \"Plotly\" and saved in `results/figures/prediction_interactive.html`.",
61
+ "category": "Visualization",
62
+ "satisfied": null
63
+ },
64
+ {
65
+ "requirement_id": 6,
66
+ "prerequisites": [
67
+ 0,
68
+ 1,
69
+ 2,
70
+ 3,
71
+ 4
72
+ ],
73
+ "criteria": "A Jupyter Notebook containing the model architecture visualization, training process, and prediction results are created and saved as PDF report as `results/report.pdf`.",
74
+ "category": "Other",
75
+ "satisfied": null
76
+ }
77
+ ],
78
+ "preferences": [],
79
+ "is_kaggle_api_needed": false,
80
+ "is_training_needed": true,
81
+ "is_web_navigation_needed": false
82
+ }
instances/29_Financial_Time_Series_Prediction_LSTM_ML.json ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "29_Financial_Time_Series_Prediction_LSTM_ML",
3
+ "query": "Could you help me set up a financial time series prediction system using an LSTM model with some real-world Financial Analysis, like stock prices or Bitcoin prices? First, we'll need to clean the data, taking care of any missing values and outliers in `src/data_loader.py`. Then, let's convert the time series data into a supervised learning format using a time window in `src/data_loader.py`. Finally apply a LSTM model for prediction, where the LSTM model is implemented in `src/model.py`. Once you've got the predictions, save the results as `results/prediction_results.text`. Create an interactive dashboard visualizing prediction results using Dash and save the implementation in `src/dashboard.py`. Finally, I'd appreciate a Markdown document that shows the model architecture, training process, and performance analysis, saved as `results/report.md`. Make sure the system manages the start and stop of the Dash app automatically to save resources. Thanks so much!",
4
+ "tags": [
5
+ "Financial Analysis",
6
+ "Supervised Learning",
7
+ "Time Series Forecasting"
8
+ ],
9
+ "requirements": [
10
+ {
11
+ "requirement_id": 0,
12
+ "prerequisites": [],
13
+ "criteria": "Some real-world financial time series data (e.g., \"stock prices\" or \"Bitcoin prices\") is loaded in `src/data_loader.py`.",
14
+ "category": "Dataset or Environment",
15
+ "satisfied": null
16
+ },
17
+ {
18
+ "requirement_id": 1,
19
+ "prerequisites": [
20
+ 0
21
+ ],
22
+ "criteria": "Data cleaning is performed, including handling missing values and outliers in `src/data_loader.py`.",
23
+ "category": "Data preprocessing and postprocessing",
24
+ "satisfied": null
25
+ },
26
+ {
27
+ "requirement_id": 2,
28
+ "prerequisites": [
29
+ 1
30
+ ],
31
+ "criteria": "A time window is used to convert the time series data into a supervised learning problem. Please implement this in `src/data_loader.py`.",
32
+ "category": "Data preprocessing and postprocessing",
33
+ "satisfied": null
34
+ },
35
+ {
36
+ "requirement_id": 3,
37
+ "prerequisites": [],
38
+ "criteria": "An \"LSTM\" model is used for financial time series prediction and implemented in `src/model.py`.",
39
+ "category": "Machine Learning Method",
40
+ "satisfied": null
41
+ },
42
+ {
43
+ "requirement_id": 4,
44
+ "prerequisites": [
45
+ 2,
46
+ 3
47
+ ],
48
+ "criteria": "Prediction results saved as `results/prediction_results.txt`.",
49
+ "category": "Other",
50
+ "satisfied": null
51
+ },
52
+ {
53
+ "requirement_id": 5,
54
+ "prerequisites": [
55
+ 2,
56
+ 3
57
+ ],
58
+ "criteria": "An interactive visualization dashboard of prediction results is created using \"Dash\". The implementation is saved in `src/visualize.py`.",
59
+ "category": "Visualization",
60
+ "satisfied": null
61
+ },
62
+ {
63
+ "requirement_id": 6,
64
+ "prerequisites": [
65
+ 2,
66
+ 3,
67
+ 4,
68
+ 5
69
+ ],
70
+ "criteria": "A Markdown document containing the model architecture, training process, and performance analysis is generated, and saved as `results/report.md`.",
71
+ "category": "Other",
72
+ "satisfied": null
73
+ }
74
+ ],
75
+ "preferences": [
76
+ {
77
+ "preference_id": 0,
78
+ "criteria": "The \"Dash\" dashboard should allow users to interact with the prediction results, enabling exploration of different time frames and zooming into specific periods for detailed analysis.",
79
+ "satisfied": null
80
+ },
81
+ {
82
+ "preference_id": 1,
83
+ "criteria": "During development, the system should automatically manage the start and stop of the \"Dash\" application to prevent unnecessary resource usage.",
84
+ "satisfied": null
85
+ }
86
+ ],
87
+ "is_kaggle_api_needed": false,
88
+ "is_training_needed": true,
89
+ "is_web_navigation_needed": false
90
+ }
instances/30_Image_Segmentation_UNet_PascalVOC_DL.json ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "30_Image_Segmentation_UNet_PascalVOC_DL",
3
+ "query": "Could you help me set up an image segmentation project using the Pascal VOC dataset and a pre-trained U-Net model implemented in PyTorch? There is no need for additional training. Apply data augmentation (e.g., flipping and rotating images), use the Dice coefficient for evaluation, save the segmented images to `results/figures/`, generate and save a GIF animation showing images before and after the segmentation to `results/figures/segmentation_results.gif`, and create a well-documented Jupyter Notebook with the model architecture, process, and segmentation results, converting it to an HTML report (saving it as `results/report.html`).",
4
+ "tags": [
5
+ "Computer Vision"
6
+ ],
7
+ "requirements": [
8
+ {
9
+ "requirement_id": 0,
10
+ "prerequisites": [],
11
+ "criteria": "The \"Pascal VOC\" dataset is used in `src/data_loader.py`.",
12
+ "category": "Dataset or Environment",
13
+ "satisfied": null
14
+ },
15
+ {
16
+ "requirement_id": 1,
17
+ "prerequisites": [
18
+ 0
19
+ ],
20
+ "criteria": "Data augmentation, including flipping and rotating images, is performed in `src/data_loader.py`.",
21
+ "category": "Data preprocessing and postprocessing",
22
+ "satisfied": null
23
+ },
24
+ {
25
+ "requirement_id": 2,
26
+ "prerequisites": [],
27
+ "criteria": "A pre-trained \"U-Net\" model from PyTorch is used without additional training and saved in `models/saved_models/`.",
28
+ "category": "Machine Learning Method",
29
+ "satisfied": null
30
+ },
31
+ {
32
+ "requirement_id": 3,
33
+ "prerequisites": [
34
+ 1,
35
+ 2
36
+ ],
37
+ "criteria": "The \"Dice coefficient\" is used for evaluation and should be saved in `results/metrics/`.",
38
+ "category": "Performance Metrics",
39
+ "satisfied": null
40
+ },
41
+ {
42
+ "requirement_id": 4,
43
+ "prerequisites": [
44
+ 1,
45
+ 2
46
+ ],
47
+ "criteria": "Segmented images are saved to the specified folder `results/figures/`.",
48
+ "category": "Data preprocessing and postprocessing",
49
+ "satisfied": null
50
+ },
51
+ {
52
+ "requirement_id": 5,
53
+ "prerequisites": [
54
+ 1,
55
+ 2,
56
+ 4
57
+ ],
58
+ "criteria": "A GIF animation of images before and after the segmentation is generated and saved as `results/figures/segmentation_results.gif`.",
59
+ "category": "Visualization",
60
+ "satisfied": null
61
+ },
62
+ {
63
+ "requirement_id": 6,
64
+ "prerequisites": [
65
+ 1,
66
+ 2,
67
+ 3,
68
+ 4
69
+ ],
70
+ "criteria": "A Jupyter Notebook is created containing the model architecture, the process of applying the pre-trained model, and segmentation results. It is converted to an HTML report and saved as `results/report.html`.",
71
+ "category": "Visualization",
72
+ "satisfied": null
73
+ }
74
+ ],
75
+ "preferences": [
76
+ {
77
+ "preference_id": 0,
78
+ "criteria": "The Jupyter Notebook should include well-documented code snippets explaining each step of the process.",
79
+ "satisfied": null
80
+ },
81
+ {
82
+ "preference_id": 1,
83
+ "criteria": "The GIF animation should clearly show the changes before and after segmentation over different images from the dataset.",
84
+ "satisfied": null
85
+ }
86
+ ],
87
+ "is_kaggle_api_needed": false,
88
+ "is_training_needed": false,
89
+ "is_web_navigation_needed": false
90
+ }
instances/31_Cancer_Prediction_SVM_BreastCancer_ML.json ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "31_Cancer_Prediction_SVM_BreastCancer_ML",
3
+ "query": "Could you help me create a project for breast cancer prediction using an SVM model with the Breast Cancer Wisconsin dataset? Load the dataset and perform feature selection to identify important features in `src/data_loader.py`. Implement the SVM classifier for cancer prediction in `src/model.py`. Use cross-validation to evaluate the model in `src/train.py`. Save the confusion matrix as `results/figures/confusion_matrix.png`. Put together a detailed report that documents the entire process-from data preprocessing to model training and evaluation. The report should cover the feature selection process and include a clear heatmap of the performance metrics. Save the report as `results/metrics/breast_cancer_prediction_report.pdf`.",
4
+ "tags": [
5
+ "Classification",
6
+ "Medical Analysis",
7
+ "Supervised Learning"
8
+ ],
9
+ "requirements": [
10
+ {
11
+ "requirement_id": 0,
12
+ "prerequisites": [],
13
+ "criteria": "The \"Breast Cancer Wisconsin\" dataset is used.",
14
+ "category": "Dataset or Environment",
15
+ "satisfied": null
16
+ },
17
+ {
18
+ "requirement_id": 1,
19
+ "prerequisites": [
20
+ 0
21
+ ],
22
+ "criteria": "Feature selection is performed to identify important features in `src/data_loader.py`.",
23
+ "category": "Data preprocessing and postprocessing",
24
+ "satisfied": null
25
+ },
26
+ {
27
+ "requirement_id": 2,
28
+ "prerequisites": [],
29
+ "criteria": "The \"SVM classifier\" is used for cancer prediction and should be implemented in `src/model.py`.",
30
+ "category": "Machine Learning Method",
31
+ "satisfied": null
32
+ },
33
+ {
34
+ "requirement_id": 3,
35
+ "prerequisites": [
36
+ 1,
37
+ 2
38
+ ],
39
+ "criteria": "Cross-validation is used to evaluate the model in `src/train.py`.",
40
+ "category": "Performance Metrics",
41
+ "satisfied": null
42
+ },
43
+ {
44
+ "requirement_id": 4,
45
+ "prerequisites": [
46
+ 1,
47
+ 2,
48
+ 3
49
+ ],
50
+ "criteria": "The confusion matrix is printed and saved as `results/figures/confusion_matrix.png`.",
51
+ "category": "Visualization",
52
+ "satisfied": null
53
+ },
54
+ {
55
+ "requirement_id": 5,
56
+ "prerequisites": [
57
+ 1,
58
+ 2,
59
+ 3,
60
+ 4
61
+ ],
62
+ "criteria": "A detailed report containing the data preprocessing, model training, and evaluation process is created and saved as `results/metrics/breast_cancer_prediction_report.pdf`.",
63
+ "category": "Other",
64
+ "satisfied": null
65
+ }
66
+ ],
67
+ "preferences": [
68
+ {
69
+ "preference_id": 0,
70
+ "criteria": "The feature selection process should be well-documented in the report, explaining why certain features were chosen.",
71
+ "satisfied": null
72
+ },
73
+ {
74
+ "preference_id": 1,
75
+ "criteria": "The heatmap should clearly distinguish between different performance metrics, such as precision, recall, and F1-score.",
76
+ "satisfied": null
77
+ },
78
+ {
79
+ "preference_id": 2,
80
+ "criteria": "The report should include a discussion on the model's performance and potential areas for improvement.",
81
+ "satisfied": null
82
+ }
83
+ ],
84
+ "is_kaggle_api_needed": false,
85
+ "is_training_needed": true,
86
+ "is_web_navigation_needed": false
87
+ }
instances/32_Weather_Data_Analysis_LinearRegression_Weather_ML.json ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "32_Weather_Data_Analysis_LinearRegression_Weather_ML",
3
+ "query": "Develop a weather data analysis system using a Linear Regression model on the Weather dataset from Kaggle. Load the dataset and perform feature engineering, including feature selection and generation and handle missing data using mean imputation or interpolation in `src/data_loader.py`. Then, apply the Linear Regression model should be implemented in `src/model.py`. Visualize and save the correlation matrix in `results/figures/correlation_matrix.png` and the prediction results as a line plot with confidence intervals in `results/figures/prediction_results.png`. Finally, create a detailed report covering data preprocessing, feature engineering, model training, and prediction results. Save the report in `results/weather_analysis_report.pdf`. The feature engineering process should be well-documented.",
4
+ "tags": [
5
+ "Regression",
6
+ "Supervised Learning"
7
+ ],
8
+ "requirements": [
9
+ {
10
+ "requirement_id": 0,
11
+ "prerequisites": [],
12
+ "criteria": "The \"Kaggle Weather\" dataset is loaded in `src/data_loader.py`.",
13
+ "category": "Dataset or Environment",
14
+ "satisfied": null
15
+ },
16
+ {
17
+ "requirement_id": 1,
18
+ "prerequisites": [
19
+ 0
20
+ ],
21
+ "criteria": "Feature engineering, including feature selection and generation, is performed in `src/data_loader.py`.",
22
+ "category": "Data preprocessing and postprocessing",
23
+ "satisfied": null
24
+ },
25
+ {
26
+ "requirement_id": 2,
27
+ "prerequisites": [
28
+ 1
29
+ ],
30
+ "criteria": "Missing data is handled using mean imputation or interpolation in `src/data_loader.py`.",
31
+ "category": "Data preprocessing and postprocessing",
32
+ "satisfied": null
33
+ },
34
+ {
35
+ "requirement_id": 3,
36
+ "prerequisites": [],
37
+ "criteria": "The \"Linear Regression\" model is used for weather data analysis and should be implemented in `src/model.py`.",
38
+ "category": "Machine Learning Method",
39
+ "satisfied": null
40
+ },
41
+ {
42
+ "requirement_id": 4,
43
+ "prerequisites": [
44
+ 2,
45
+ 3
46
+ ],
47
+ "criteria": "The correlation matrix is saved as `results/figures/correlation_matrix.png`.",
48
+ "category": "Visualization",
49
+ "satisfied": null
50
+ },
51
+ {
52
+ "requirement_id": 5,
53
+ "prerequisites": [
54
+ 2,
55
+ 3
56
+ ],
57
+ "criteria": "Prediction results are plotted and saved as a line plot with confidence intervals. The plot is saved as `results/figures/prediction_results.png`.",
58
+ "category": "Visualization",
59
+ "satisfied": null
60
+ },
61
+ {
62
+ "requirement_id": 6,
63
+ "prerequisites": [
64
+ 2,
65
+ 3,
66
+ 5
67
+ ],
68
+ "criteria": "A detailed report containing data preprocessing, feature engineering, model training, and prediction results is created and saved as `results/weather_analysis_report.pdf`.",
69
+ "category": "Other",
70
+ "satisfied": null
71
+ }
72
+ ],
73
+ "preferences": [
74
+ {
75
+ "preference_id": 0,
76
+ "criteria": "The feature engineering process should be clearly documented in the report, explaining the rationale behind feature selection and generation.",
77
+ "satisfied": null
78
+ },
79
+ {
80
+ "preference_id": 1,
81
+ "criteria": "The report should include a discussion on the correlation matrix, highlighting any interesting relationships between features.",
82
+ "satisfied": null
83
+ }
84
+ ],
85
+ "is_kaggle_api_needed": true,
86
+ "is_training_needed": true,
87
+ "is_web_navigation_needed": false
88
+ }
instances/33_Object_Detection_YOLOv3_COCO_DL.json ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "33_Object_Detection_YOLOv3_COCO_DL",
3
+ "query": "Help me develop an object detection system using the YOLOv3 model and the COCO dataset. Download the dataset and preprocess the images by resizing and normalization in `src/data_loader.py`. Implement the YOLOv3 model and use Non-Maximum Suppression (NMS) to refine the results in `src/model.py`. Save the detected objects to `results/figures/`, and create an interactive Streamlit web page in `src/app.py` to display the detection results. Finally, evaluate the model's performance, including metrics such as mAP and inference time, and save the evaluation results to `results/metrics/model_performance.txt`. The system should properly manage the launch and termination of the Streamlit application to prevent unnecessary resource usage.",
4
+ "tags": [
5
+ "Computer Vision"
6
+ ],
7
+ "requirements": [
8
+ {
9
+ "requirement_id": 0,
10
+ "prerequisites": [],
11
+ "criteria": "The \"COCO\" dataset downloading is implemented in `src/data_loader.py`.",
12
+ "category": "Dataset or Environment",
13
+ "satisfied": null
14
+ },
15
+ {
16
+ "requirement_id": 1,
17
+ "prerequisites": [
18
+ 0
19
+ ],
20
+ "criteria": "Data preprocessing, including resizing and normalization of images, is performed in `src/data_loader.py`.",
21
+ "category": "Data preprocessing and postprocessing",
22
+ "satisfied": null
23
+ },
24
+ {
25
+ "requirement_id": 2,
26
+ "prerequisites": [],
27
+ "criteria": "The \"YOLOv3\" model is implemented in `src/model.py`.",
28
+ "category": "Machine Learning Method",
29
+ "satisfied": null
30
+ },
31
+ {
32
+ "requirement_id": 3,
33
+ "prerequisites": [
34
+ 1,
35
+ 2
36
+ ],
37
+ "criteria": "\"Non-Maximum Suppression\" (NMS) is applied to refine detection results. Please implement this in `src/model.py`.",
38
+ "category": "Data preprocessing and postprocessing",
39
+ "satisfied": null
40
+ },
41
+ {
42
+ "requirement_id": 4,
43
+ "prerequisites": [
44
+ 2,
45
+ 3
46
+ ],
47
+ "criteria": "Detection results are saved to the specified folder `results/figures/`.",
48
+ "category": "Visualization",
49
+ "satisfied": null
50
+ },
51
+ {
52
+ "requirement_id": 5,
53
+ "prerequisites": [
54
+ 2,
55
+ 3,
56
+ 4
57
+ ],
58
+ "criteria": "An interactive web page in `src/app.py` using \"Streamlit\" is created to display detection results saved in `results/figures/`.",
59
+ "category": "Human Computer Interaction",
60
+ "satisfied": null
61
+ },
62
+ {
63
+ "requirement_id": 6,
64
+ "prerequisites": [
65
+ 2,
66
+ 3
67
+ ],
68
+ "criteria": "Model performance evaluation results are saved in `results/metrics/model_performance.txt`.",
69
+ "category": "Performance Metrics",
70
+ "satisfied": null
71
+ }
72
+ ],
73
+ "preferences": [
74
+ {
75
+ "preference_id": 0,
76
+ "criteria": "The \"Streamlit\" web page should be user-friendly, allowing users to easily upload and view new images for detection.",
77
+ "satisfied": null
78
+ },
79
+ {
80
+ "preference_id": 1,
81
+ "criteria": "The performence evalution includes mAP and inference time as metrics.",
82
+ "satisfied": null
83
+ },
84
+ {
85
+ "preference_id": 2,
86
+ "criteria": " The system should properly manage the launch and termination of the Streamlit application.",
87
+ "satisfied": null
88
+ }
89
+ ],
90
+ "is_kaggle_api_needed": false,
91
+ "is_training_needed": true,
92
+ "is_web_navigation_needed": false
93
+ }
instances/34_Customer_Segmentation_KMeans_CustomerSegmentation_ML.json ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "34_Customer_Segmentation_KMeans_CustomerSegmentation_ML",
3
+ "query": "I need to create a customer segmentation system using the K-means clustering algorithm with the Kaggle Customer Segmentation dataset. Start by standardizing the data in `src/data_loader.py`, then use the elbow method to determine the optimal number of clusters and save the elbow plot to `results/figures/elbow.jpg`. Implement the K-means algorithm in `src/model.py`. Save the cluster centers in `results/metrics/cluster_centers.txt`. Visualize the segmentation results using seaborn and save the plot as `results/figures/customer_segmentation.png`. Create an interactive Dash dashboard allowing dynamic exploration of the segments.",
4
+ "tags": [
5
+ "Unsupervised Learning"
6
+ ],
7
+ "requirements": [
8
+ {
9
+ "requirement_id": 0,
10
+ "prerequisites": [],
11
+ "criteria": "The \"Kaggle Customer Segmentation\" dataset is used, including data loading and preparation in `src/data_loader.py`.",
12
+ "category": "Dataset or Environment",
13
+ "satisfied": null
14
+ },
15
+ {
16
+ "requirement_id": 1,
17
+ "prerequisites": [
18
+ 0
19
+ ],
20
+ "criteria": "Data is standardized to ensure feature values are within the same range in `src/data_loader.py`.",
21
+ "category": "Data preprocessing and postprocessing",
22
+ "satisfied": null
23
+ },
24
+ {
25
+ "requirement_id": 2,
26
+ "prerequisites": [
27
+ 1
28
+ ],
29
+ "criteria": "The elbow method is used to determine the optimal number of clusters. Please save the elbow plot to `results/figures/elbow.jpg`.",
30
+ "category": "Machine Learning Method",
31
+ "satisfied": null
32
+ },
33
+ {
34
+ "requirement_id": 3,
35
+ "prerequisites": [],
36
+ "criteria": "The K-means clustering algorithm is implemented in `src/model.py`.",
37
+ "category": "Machine Learning Method",
38
+ "satisfied": null
39
+ },
40
+ {
41
+ "requirement_id": 4,
42
+ "prerequisites": [
43
+ 2,
44
+ 3
45
+ ],
46
+ "criteria": "Cluster centers are saved in `results/metrics/cluster_centers.txt`.",
47
+ "category": "Save Trained Model",
48
+ "satisfied": null
49
+ },
50
+ {
51
+ "requirement_id": 5,
52
+ "prerequisites": [
53
+ 2,
54
+ 3,
55
+ 4
56
+ ],
57
+ "criteria": "The Customer segmentation is visualized using \"seaborn,\" with the plot saved as `results/figures/customer_segmentation.png`.",
58
+ "category": "Visualization",
59
+ "satisfied": null
60
+ },
61
+ {
62
+ "requirement_id": 6,
63
+ "prerequisites": [
64
+ 2,
65
+ 3,
66
+ 4
67
+ ],
68
+ "criteria": "An interactive dashboard which allows dynamic exploration of the segments is created using \"Dash\".",
69
+ "category": "Human Computer Interaction",
70
+ "satisfied": null
71
+ }
72
+ ],
73
+ "preferences": [
74
+ {
75
+ "preference_id": 0,
76
+ "criteria": "The elbow plot clearly shows how the optimal number of clusters is determined.",
77
+ "satisfied": null
78
+ },
79
+ {
80
+ "preference_id": 1,
81
+ "criteria": " The system properly manages the launch and termination of the dashboard.",
82
+ "satisfied": null
83
+ }
84
+ ],
85
+ "is_kaggle_api_needed": true,
86
+ "is_training_needed": true,
87
+ "is_web_navigation_needed": false
88
+ }
instances/35_Loan_Default_Prediction_RandomForest_LendingClub_ML.json ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "35_Loan_Default_Prediction_RandomForest_LendingClub_ML",
3
+ "query": "Can you help me build a loan default prediction system using a Random Forest classifier with the Lending Club Loan dataset? Start by loading the dataset, handling imbalanced data using oversampling or undersampling techniques, and performing feature selection to identify important features, all implemented in `src/data_loader.py`. Train a Random Forest model and save the trained model in `models/saved_models/`. Save the feature importances to `results/feature_importances.txt` and save the ROC curve as `results/figures/roc_curve.png` using matplotlib. Finally, create a detailed Markdown report summarizing the data preprocessing steps, model training, and evaluation process, and save it as `results/loan_default_prediction_report.md`. The report should include insights on model performance and suggestions for potential improvements.",
4
+ "tags": [
5
+ "Classification",
6
+ "Supervised Learning"
7
+ ],
8
+ "requirements": [
9
+ {
10
+ "requirement_id": 0,
11
+ "prerequisites": [],
12
+ "criteria": "The \"Lending Club Loan\" dataset is loaded in `src/data_loader.py`.",
13
+ "category": "Dataset or Environment",
14
+ "satisfied": null
15
+ },
16
+ {
17
+ "requirement_id": 1,
18
+ "prerequisites": [
19
+ 0
20
+ ],
21
+ "criteria": "Imbalanced data is handled using oversampling or undersampling techniques, implemented in `src/data_loader.py`.",
22
+ "category": "Data preprocessing and postprocessing",
23
+ "satisfied": null
24
+ },
25
+ {
26
+ "requirement_id": 2,
27
+ "prerequisites": [
28
+ 0
29
+ ],
30
+ "criteria": "Feature selection is performed to identify important features in `src/data_loader.py`.",
31
+ "category": "Data preprocessing and postprocessing",
32
+ "satisfied": null
33
+ },
34
+ {
35
+ "requirement_id": 3,
36
+ "prerequisites": [
37
+ 2
38
+ ],
39
+ "criteria": "A \"Random Forest\" classifier is implemented for predicting loan default. Save the trained model in `models/saved_models/`.",
40
+ "category": "Machine Learning Method",
41
+ "satisfied": null
42
+ },
43
+ {
44
+ "requirement_id": 4,
45
+ "prerequisites": [
46
+ 2,
47
+ 3
48
+ ],
49
+ "criteria": "Feature importances are saved as `results/feature_importances.txt`.",
50
+ "category": "Other",
51
+ "satisfied": null
52
+ },
53
+ {
54
+ "requirement_id": 5,
55
+ "prerequisites": [
56
+ 2,
57
+ 3
58
+ ],
59
+ "criteria": "The \"ROC curve\" is visualized and saved using \"matplotlib\" at `results/figures/roc_curve.png`.",
60
+ "category": "Visualization",
61
+ "satisfied": null
62
+ },
63
+ {
64
+ "requirement_id": 6,
65
+ "prerequisites": [
66
+ 2,
67
+ 3,
68
+ 5
69
+ ],
70
+ "criteria": "A Markdown report containing the data preprocessing steps, model training, and evaluation process is created and saved as `results/loan_default_prediction_report.md`.",
71
+ "category": "Other",
72
+ "satisfied": null
73
+ }
74
+ ],
75
+ "preferences": [
76
+ {
77
+ "preference_id": 0,
78
+ "criteria": "The Markdown report is detailed.",
79
+ "satisfied": null
80
+ },
81
+ {
82
+ "preference_id": 1,
83
+ "criteria": "The Markdown report should include insights on model performance and suggestions for potential improvements.",
84
+ "satisfied": null
85
+ }
86
+ ],
87
+ "is_kaggle_api_needed": false,
88
+ "is_training_needed": true,
89
+ "is_web_navigation_needed": false
90
+ }
instances/36_Music_Emotion_Classification_SVM_GTZAN_ML.json ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "36_Music_Emotion_Classification_SVM_GTZAN_ML",
3
+ "query": "Help me develop a project for music emotion classification using an SVM model with the GTZAN dataset. The project should include audio preprocessing using librosa for noise removal and normalization, MFCC feature extraction with 13 coefficients, and the use of a linear SVM classifier with hyperparameter tuning. The dataset loading, audio preprocessing, including noise removal and normalization, and MFCC feature extraction must be implemented in `src/data_loader.py`. Implement the SVM classifier in `src/model.py`. Save the classification results to `results/predictions.txt`, visualize audio spectrograms with librosa (saving them to `results/figures/`), and create an interactive webpage in `src/app.py` with Streamlit that allows real-time audio file uploads and displays both classification results and spectrograms in `results/figures/`. Ensure efficient resource management with attention to automatically managing the launch and termination of the Streamlit webpage.",
4
+ "tags": [
5
+ "Audio Processing",
6
+ "Classification"
7
+ ],
8
+ "requirements": [
9
+ {
10
+ "requirement_id": 0,
11
+ "prerequisites": [],
12
+ "criteria": "The \"GTZAN\" music emotion loaded in `src/data_loader.py`.",
13
+ "category": "Dataset or Environment",
14
+ "satisfied": null
15
+ },
16
+ {
17
+ "requirement_id": 1,
18
+ "prerequisites": [
19
+ 0
20
+ ],
21
+ "criteria": "Audio preprocessing, including noise removal and normalization, is performed in `src/data_loader.py`.",
22
+ "category": "Data preprocessing and postprocessing",
23
+ "satisfied": null
24
+ },
25
+ {
26
+ "requirement_id": 2,
27
+ "prerequisites": [
28
+ 0,
29
+ 1
30
+ ],
31
+ "criteria": "MFCC feature extraction is implemented in `src/data_loader.py`.",
32
+ "category": "Data preprocessing and postprocessing",
33
+ "satisfied": null
34
+ },
35
+ {
36
+ "requirement_id": 3,
37
+ "prerequisites": [
38
+ 1,
39
+ 2
40
+ ],
41
+ "criteria": "A \"SVM classifier\" is implemented in `src/model.py`.",
42
+ "category": "Machine Learning Method",
43
+ "satisfied": null
44
+ },
45
+ {
46
+ "requirement_id": 4,
47
+ "prerequisites": [
48
+ 3
49
+ ],
50
+ "criteria": "The classification results are saved in `results/predictions.txt`.",
51
+ "category": "Other",
52
+ "satisfied": null
53
+ },
54
+ {
55
+ "requirement_id": 5,
56
+ "prerequisites": [
57
+ 0,
58
+ 1,
59
+ 2,
60
+ 3
61
+ ],
62
+ "criteria": "Audio spectrograms are visualized with \"librosa\" and saved to `results/figures/`.",
63
+ "category": "Visualization",
64
+ "satisfied": null
65
+ },
66
+ {
67
+ "requirement_id": 6,
68
+ "prerequisites": [
69
+ 0,
70
+ 1,
71
+ 2,
72
+ 3,
73
+ 4,
74
+ 5
75
+ ],
76
+ "criteria": "An interactive web page is created in `src/app.py` using \"Streamlit\" to display classification results and spectrograms in `results/figures/`.",
77
+ "category": "Human Computer Interaction",
78
+ "satisfied": null
79
+ }
80
+ ],
81
+ "preferences": [
82
+ {
83
+ "preference_id": 0,
84
+ "criteria": "The \"Streamlit\" webpage should allow users to upload new audio files and view the classification results in real-time.",
85
+ "satisfied": null
86
+ },
87
+ {
88
+ "preference_id": 1,
89
+ "criteria": "The spectrogram visualizations should include options to adjust the frequency range and time resolution for deeper analysis.",
90
+ "satisfied": null
91
+ },
92
+ {
93
+ "preference_id": 2,
94
+ "criteria": "The system should perform efficient resource management especially on managing the launch and termination of the Streamlit webpage.",
95
+ "satisfied": null
96
+ }
97
+ ],
98
+ "is_kaggle_api_needed": false,
99
+ "is_training_needed": true,
100
+ "is_web_navigation_needed": false
101
+ }
instances/37_Lane_Detection_ResNet50_TuSimple_DL.json ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "37_Lane_Detection_ResNet50_TuSimple_DL",
3
+ "query": "Develop a lane detection system. Start by importing the standard pre-trained ResNet-50 model from PyTorch in `src/model.py`. We'll work here with the TuSimple lane detection dataset as our test dataset, which should be loaded through `src/data_loader.py`. Then load and preprocess the dataset, including data augmentation techniques such as random cropping, rotation, and scaling in `src/data_loader.py`. Fine-tune the model and save the detection accuracy in `results/metrics/detection_accuracy.txt`, and save the trained model as `models/saved_models/lane_detection_model.pth`. Split a subset of the data for validation, implemented in `src/data_loader.py`. Visualize detection results using matplotlib and save them to `results/figures/`. Create a detailed report of the entire process, including data preprocessing, model training, and evaluation, and save it as `results/lane_detection_report.pdf`. The report should also analyze the model's performance under challenging conditions such as curves or poor lighting.",
4
+ "tags": [
5
+ "Computer Vision"
6
+ ],
7
+ "requirements": [
8
+ {
9
+ "requirement_id": 0,
10
+ "prerequisites": [],
11
+ "criteria": "The \"TuSimple\" lane detection dataset is loaded in `src/data_loader.py`.",
12
+ "category": "Dataset or Environment",
13
+ "satisfied": null
14
+ },
15
+ {
16
+ "requirement_id": 1,
17
+ "prerequisites": [
18
+ 0
19
+ ],
20
+ "criteria": "Data augmentation, including random cropping, rotation, and scaling, is performed in `src/data_loader.py`.",
21
+ "category": "Data preprocessing and postprocessing",
22
+ "satisfied": null
23
+ },
24
+ {
25
+ "requirement_id": 2,
26
+ "prerequisites": [
27
+ 0
28
+ ],
29
+ "criteria": "A subset of the data is split for validation and implemented in `src/data_loader.py`.",
30
+ "category": "Data preprocessing and postprocessing",
31
+ "satisfied": null
32
+ },
33
+ {
34
+ "requirement_id": 3,
35
+ "prerequisites": [],
36
+ "criteria": "The pre-trained \"ResNet-50\" model is imported from PyTorch in `src/model.py`.",
37
+ "category": "Machine Learning Method",
38
+ "satisfied": null
39
+ },
40
+ {
41
+ "requirement_id": 4,
42
+ "prerequisites": [
43
+ 1,
44
+ 2,
45
+ 3
46
+ ],
47
+ "criteria": "Fine tune the \"ResNet-50\" model and save it as `models/saved_models/lane_detection_model.pth`.",
48
+ "category": "Save Trained Model",
49
+ "satisfied": null
50
+ },
51
+ {
52
+ "requirement_id": 5,
53
+ "prerequisites": [
54
+ 4
55
+ ],
56
+ "criteria": "Detection accuracy is saved as `results/metrics/detection_accuracy.txt`.",
57
+ "category": "Performance Metrics",
58
+ "satisfied": null
59
+ },
60
+ {
61
+ "requirement_id": 6,
62
+ "prerequisites": [
63
+ 4
64
+ ],
65
+ "criteria": "Detection results are visualized with \"matplotlib\" and saved to `results/figures/`.",
66
+ "category": "Visualization",
67
+ "satisfied": null
68
+ },
69
+ {
70
+ "requirement_id": 7,
71
+ "prerequisites": [
72
+ 0,
73
+ 1,
74
+ 2,
75
+ 3,
76
+ 4,
77
+ 5
78
+ ],
79
+ "criteria": "A detailed report containing data preprocessing, model training, and evaluation process is created and saved as `results/lane_detection_report.pdf`.",
80
+ "category": "Other",
81
+ "satisfied": null
82
+ }
83
+ ],
84
+ "preferences": [
85
+ {
86
+ "preference_id": 0,
87
+ "criteria": "The report should include an analysis of the model's performance on challenging scenarios, such as curves or poor lighting conditions.",
88
+ "satisfied": null
89
+ },
90
+ {
91
+ "preference_id": 1,
92
+ "criteria": "The data augmentation steps should be well-documented, with examples of augmented images included in the report.",
93
+ "satisfied": null
94
+ }
95
+ ],
96
+ "is_kaggle_api_needed": false,
97
+ "is_training_needed": true,
98
+ "is_web_navigation_needed": false
99
+ }
instances/38_Object_Tracking_Siamese_OTB50_DL.json ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "38_Object_Tracking_Siamese_OTB50_DL",
3
+ "query": "I need to create a system for object tracking using a Siamese network and the OTB50 dataset. The OTB50 dataset should be loaded in `src/data_loader.py`. The system should include data augmentation steps such as rotation and scaling, performed in `src/data_loader.py`. Implement the Siamese network in `src/model.py`. Hyperparameters, such as learning rate and batch size, should be tuned in `src/train.py`. The tracking results should be saved as `results/tracking_results.txt`. Visualize the tracking results with OpenCV and save tracking videos under `results/videos/`. Additionally, create a comprehensive Markdown report that includes details of data preprocessing, model training, and evaluation process and save it as `results/object_tracking_report.md`. Ensure that the system can process new video sequences with minimal adjustments for flexible application. The Markdown report should include a section analyzing the impact of different hyperparameters on the tracking performance.",
4
+ "tags": [
5
+ "Computer Vision"
6
+ ],
7
+ "requirements": [
8
+ {
9
+ "requirement_id": 0,
10
+ "prerequisites": [],
11
+ "criteria": "The \"OTB50\" dataset is loaded in `src/data_loader.py`.",
12
+ "category": "Dataset or Environment",
13
+ "satisfied": null
14
+ },
15
+ {
16
+ "requirement_id": 1,
17
+ "prerequisites": [
18
+ 0
19
+ ],
20
+ "criteria": "Data augmentation, such as rotation and scaling, is performed in `src/data_loader.py`.",
21
+ "category": "Data preprocessing and postprocessing",
22
+ "satisfied": null
23
+ },
24
+ {
25
+ "requirement_id": 2,
26
+ "prerequisites": [],
27
+ "criteria": "A \"Siamese\"network is implemented in `src/model.py`.",
28
+ "category": "Machine Learning Method",
29
+ "satisfied": null
30
+ },
31
+ {
32
+ "requirement_id": 3,
33
+ "prerequisites": [
34
+ 0,
35
+ 1,
36
+ 2
37
+ ],
38
+ "criteria": "Hyperparameters, such as learning rate and batch size, are tuned in `src/train.py`.",
39
+ "category": "Machine Learning Method",
40
+ "satisfied": null
41
+ },
42
+ {
43
+ "requirement_id": 4,
44
+ "prerequisites": [
45
+ 0,
46
+ 1,
47
+ 2,
48
+ 3
49
+ ],
50
+ "criteria": "The tracking results are saved as `results/tracking_results.txt`.",
51
+ "category": "Other",
52
+ "satisfied": null
53
+ },
54
+ {
55
+ "requirement_id": 5,
56
+ "prerequisites": [
57
+ 0,
58
+ 1,
59
+ 2,
60
+ 3
61
+ ],
62
+ "criteria": "Tracking results are visualized with OpenCV and saved to `results/videos/`.",
63
+ "category": "Visualization",
64
+ "satisfied": null
65
+ },
66
+ {
67
+ "requirement_id": 6,
68
+ "prerequisites": [
69
+ 0,
70
+ 1,
71
+ 2,
72
+ 3
73
+ ],
74
+ "criteria": "A detailed Markdown document containing data preprocessing, model training, and evaluation processes is created and saved as `results/object_tracking_report.md`.",
75
+ "category": "Other",
76
+ "satisfied": null
77
+ },
78
+ {
79
+ "requirement_id": 7,
80
+ "prerequisites": [
81
+ 6
82
+ ],
83
+ "criteria": "The Markdown report should include a section analyzing the impact of different hyperparameters on tracking performance.",
84
+ "satisfied": null
85
+ }
86
+ ],
87
+ "preferences": [
88
+ {
89
+ "preference_id": 0,
90
+ "criteria": "The tracking videos should be saved in high resolution and include annotations that highlight the tracked object.",
91
+ "satisfied": null
92
+ },
93
+ {
94
+ "preference_id": 1,
95
+ "criteria": "Ensure the system is capable of processing new video sequences with minimal modification, allowing for flexible use cases.",
96
+ "satisfied": null
97
+ }
98
+ ],
99
+ "is_kaggle_api_needed": false,
100
+ "is_training_needed": true,
101
+ "is_web_navigation_needed": false
102
+ }
instances/39_Drug_Response_Prediction_SVM_GDSC_ML.json ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "39_Drug_Response_Prediction_SVM_GDSC_ML",
3
+ "query": "Develop a system to predict drug response using the GDSC dataset with a Support Vector Machine (SVM) regressor. Load the dataset and perform feature selection to identify key features in `src/data_loader.py`. Implement the SVM regressor in `src/model.py`. Use cross-validation to evaluate the model's performance in `src/train.py`. Save the performance results to `results/metrics/performance.txt`. Visualize the regression results using seaborn and save it under `results/figures/`. Next, create a report including the data preprocessing, model training, evaluation process, and the visualization. Save the report as `results/drug_response_prediction_report.pdf`. The report should emphasize how feature selection impacts the model's performance, and the regression results visualization should clearly highlight the relationship between the selected features and the predicted drug response. Ensure the system is designed to be easily extendable for incorporating additional datasets or new features.",
4
+ "tags": [
5
+ "Medical Analysis",
6
+ "Regression",
7
+ "Supervised Learning"
8
+ ],
9
+ "requirements": [
10
+ {
11
+ "requirement_id": 0,
12
+ "prerequisites": [],
13
+ "criteria": "The \"GDSC\" drug response dataset is loaded in `src/data_loader.py`.",
14
+ "category": "Dataset or Environment",
15
+ "satisfied": null
16
+ },
17
+ {
18
+ "requirement_id": 1,
19
+ "prerequisites": [
20
+ 0
21
+ ],
22
+ "criteria": "Feature selection is performed to identify important features in `src/data_loader.py`.",
23
+ "category": "Data preprocessing and postprocessing",
24
+ "satisfied": null
25
+ },
26
+ {
27
+ "requirement_id": 2,
28
+ "prerequisites": [],
29
+ "criteria": "The \"SVM regressor\" is implemented in `src/model.py`.",
30
+ "category": "Machine Learning Method",
31
+ "satisfied": null
32
+ },
33
+ {
34
+ "requirement_id": 3,
35
+ "prerequisites": [
36
+ 1,
37
+ 2
38
+ ],
39
+ "criteria": "Cross-validation is used to evaluate the model in `src/train.py`.",
40
+ "category": "Performance Metrics",
41
+ "satisfied": null
42
+ },
43
+ {
44
+ "requirement_id": 4,
45
+ "prerequisites": [
46
+ 0,
47
+ 1,
48
+ 2,
49
+ 3
50
+ ],
51
+ "criteria": "The performance results are saved as `results/metrics/performance.txt`.",
52
+ "category": "Performance Metrics",
53
+ "satisfied": null
54
+ },
55
+ {
56
+ "requirement_id": 5,
57
+ "prerequisites": [
58
+ 0,
59
+ 1,
60
+ 2,
61
+ 3
62
+ ],
63
+ "criteria": "The regression results are visualized using \"seaborn,\" and saved to `results/figures/`.",
64
+ "category": "Visualization",
65
+ "satisfied": null
66
+ },
67
+ {
68
+ "requirement_id": 6,
69
+ "prerequisites": [
70
+ 0,
71
+ 1,
72
+ 2,
73
+ 3,
74
+ 4,
75
+ 5
76
+ ],
77
+ "criteria": "A report containing data preprocessing, model training, evaluation process, and the regression results visualization, is created and saved as `results/drug_response_prediction_report.pdf`.",
78
+ "category": "Other",
79
+ "satisfied": null
80
+ }
81
+ ],
82
+ "preferences": [
83
+ {
84
+ "preference_id": 0,
85
+ "criteria": "The report should emphasize how feature selection impacts the model's performance.",
86
+ "satisfied": null
87
+ },
88
+ {
89
+ "preference_id": 1,
90
+ "criteria": "The regression results visualization should clearly highlight the relationship between the selected features and the predicted drug response.",
91
+ "satisfied": null
92
+ },
93
+ {
94
+ "preference_id": 2,
95
+ "criteria": "Ensure that the system can be easily extended to incorporate additional datasets or new features without significant rework.",
96
+ "satisfied": null
97
+ }
98
+ ],
99
+ "is_kaggle_api_needed": false,
100
+ "is_training_needed": true,
101
+ "is_web_navigation_needed": false
102
+ }
instances/40_Text_Summarization_BART_CNNDailyMail_DL.json ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "40_Text_Summarization_BART_CNNDailyMail_DL",
3
+ "query": "Develop a system that performs text summarization system using the BART model with the CNN/Daily Mail dataset. Start by loading and preparing the dataset in `src/data_loader.py`, then perform data preprocessing such as removing HTML tags and punctuation in `src/data_loader.py`. Import a pre-trained BART model for text summarization in `src/model.py` to generate summaries. Save the generated summaries to `results/summaries.txt`. Visualize the length distribution of these summaries using seaborn and save the visualization to `results/figures/summary_length_distribution.png`. Additionally, implement an interactive Streamlit web page in `src/visualize.py`, which allows users to view input texts and their generated summaries. Finally, generate a report covering data preprocessing and generation results, and save it as `results/text_summarization_report.pdf`.",
4
+ "tags": [
5
+ "Generative Models",
6
+ "Natural Language Processing"
7
+ ],
8
+ "requirements": [
9
+ {
10
+ "requirement_id": 0,
11
+ "prerequisites": [],
12
+ "criteria": "The \"CNN/Daily Mail\" news dataset is used, including loading and preparing the dataset in `src/data_loader.py`.",
13
+ "category": "Dataset or Environment",
14
+ "satisfied": null
15
+ },
16
+ {
17
+ "requirement_id": 1,
18
+ "prerequisites": [
19
+ 0
20
+ ],
21
+ "criteria": "Data preprocessing is performed in `src/data_loader.py`, including removing HTML tags and punctuation.",
22
+ "category": "Data preprocessing and postprocessing",
23
+ "satisfied": null
24
+ },
25
+ {
26
+ "requirement_id": 2,
27
+ "prerequisites": [],
28
+ "criteria": "A pre-trained \"BART\" model is imported for text summarization in `src/model.py`.",
29
+ "category": "Machine Learning Method",
30
+ "satisfied": null
31
+ },
32
+ {
33
+ "requirement_id": 3,
34
+ "prerequisites": [
35
+ 1,
36
+ 2
37
+ ],
38
+ "criteria": "The generated summary results are saved in `results/summary_results.txt`.",
39
+ "category": "Other",
40
+ "satisfied": null
41
+ },
42
+ {
43
+ "requirement_id": 4,
44
+ "prerequisites": [
45
+ 3
46
+ ],
47
+ "criteria": "The length distribution of the generated summaries is visualized using \"seaborn,\" and the plot is saved as `results/figures/summary_length_distribution.png`.",
48
+ "category": "Visualization",
49
+ "satisfied": null
50
+ },
51
+ {
52
+ "requirement_id": 5,
53
+ "prerequisites": [
54
+ 3
55
+ ],
56
+ "criteria": "An interactive web page is created using \"Streamlit\" to display input texts and their generated summaries and implemented in `src/visualize.py`.",
57
+ "category": "Human Computer Interaction",
58
+ "satisfied": null
59
+ },
60
+ {
61
+ "requirement_id": 6,
62
+ "prerequisites": [
63
+ 3
64
+ ],
65
+ "criteria": "A report covering data preprocessing, model training, and generation results is generated and saved as `results/text_summarization_report.pdf`.",
66
+ "category": "Other",
67
+ "satisfied": null
68
+ }
69
+ ],
70
+ "preferences": [
71
+ {
72
+ "preference_id": 0,
73
+ "criteria": "The interactive \"Streamlit\" webpage should allow users to input new text and generate summaries in real-time.",
74
+ "satisfied": null
75
+ },
76
+ {
77
+ "preference_id": 1,
78
+ "criteria": "The report should include a discussion on how different hyperparameter settings affected the model's performance.",
79
+ "satisfied": null
80
+ },
81
+ {
82
+ "preference_id": 2,
83
+ "criteria": "During development, the \"Streamlit\" application should be efficiently managed to avoid unnecessary resource usage.",
84
+ "satisfied": null
85
+ }
86
+ ],
87
+ "is_kaggle_api_needed": false,
88
+ "is_training_needed": false,
89
+ "is_web_navigation_needed": false
90
+ }
instances/41_Stock_Classification_KNN_YahooFinance_ML.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "41_Stock_Classification_KNN_YahooFinance_ML",
3
+ "query": "Develop a stock classification system using a KNN model on the Yahoo Finance dataset. Your implementation should decide if a given stock will increase or decrease in price. Start by loading the dataset and performing feature engineering, including generating technical indicators and selecting the most relevant features in `src/data_loader.py`. Standardize the data to ensure feature values are within the same range in `src/data_loader.py`. Apply the KNN classifier to classify stocks based on the engineered features, and save the implementation in `src/model.py`. Next, save the classification results to `results/classification_results.txt`, and visualize the correlation between the technical indicators and the classification result as a heatmap using seaborn. Save the headmap as `results/figures/feature_correlation_heatmap.png`. Finally, create an interactive Jupyter Notebook under `results/` that explains the process, showcases the classification results, and will help ease future updates that introduce new data.",
4
+ "tags": [
5
+ "Classification",
6
+ "Financial Analysis",
7
+ "Supervised Learning"
8
+ ],
9
+ "requirements": [
10
+ {
11
+ "requirement_id": 0,
12
+ "prerequisites": [],
13
+ "criteria": "The \"Yahoo Finance\" dataset is used, including data loading and preparation in `src/data_loader.py`.",
14
+ "category": "Dataset or Environment",
15
+ "satisfied": null
16
+ },
17
+ {
18
+ "requirement_id": 1,
19
+ "prerequisites": [
20
+ 0
21
+ ],
22
+ "criteria": "Feature engineering is performed, including generating technical indicators and conducting feature selection in `src/data_loader.py`.",
23
+ "category": "Data preprocessing and postprocessing",
24
+ "satisfied": null
25
+ },
26
+ {
27
+ "requirement_id": 2,
28
+ "prerequisites": [
29
+ 0
30
+ ],
31
+ "criteria": "Data is standardized to ensure feature values are within the same range in `src/data_loader.py`.",
32
+ "category": "Data preprocessing and postprocessing",
33
+ "satisfied": null
34
+ },
35
+ {
36
+ "requirement_id": 3,
37
+ "prerequisites": [
38
+ 2
39
+ ],
40
+ "criteria": "The \"KNN classifier\" is applied to classify stocks based on the engineered features. Please save the implementation in `src/model.py`.",
41
+ "category": "Machine Learning Method",
42
+ "satisfied": null
43
+ },
44
+ {
45
+ "requirement_id": 4,
46
+ "prerequisites": [
47
+ 3
48
+ ],
49
+ "criteria": "The classification results are saved in `results/classification_results.txt`.",
50
+ "category": "Other",
51
+ "satisfied": null
52
+ },
53
+ {
54
+ "requirement_id": 5,
55
+ "prerequisites": [
56
+ 4
57
+ ],
58
+ "criteria": "A heatmap representing the correlations between the technical indicators and the classification results is saved as `results/figures/feature_correlation_heatmap.png`.",
59
+ "category": "Visualization",
60
+ "satisfied": null
61
+ },
62
+ {
63
+ "requirement_id": 6,
64
+ "prerequisites": [
65
+ 4
66
+ ],
67
+ "criteria": "An interactive \"Jupyter Notebook\" is created under `results/` to explain the process and showcase the classification results.",
68
+ "category": "Human Computer Interaction",
69
+ "satisfied": null
70
+ }
71
+ ],
72
+ "preferences": [
73
+ {
74
+ "preference_id": 0,
75
+ "criteria": "The Jupyter Notebook should include clear explanations of each step, including feature engineering and model evaluation.",
76
+ "satisfied": null
77
+ },
78
+ {
79
+ "preference_id": 1,
80
+ "criteria": "The correlation heatmap should highlight the most significant technical indicators and provide insights into their relationships.",
81
+ "satisfied": null
82
+ },
83
+ {
84
+ "preference_id": 2,
85
+ "criteria": "The system should allow easy updates with new data, making the notebook flexible for future analysis.",
86
+ "satisfied": null
87
+ }
88
+ ],
89
+ "is_kaggle_api_needed": false,
90
+ "is_training_needed": true,
91
+ "is_web_navigation_needed": false
92
+ }
instances/42_Medical_Image_Classification_DenseNet121_ChestXray_DL.json ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "42_Medical_Image_Classification_DenseNet121_ChestXray_DL",
3
+ "query": "Create a medical image classification system using a pre-trained DenseNet-121 model and the Kaggle Chest X-ray dataset. Start by loading and preprocessing the dataset and performing data augmentation (including rotation, translation, and scaling) in `src/data_loader.py`. Apply the DenseNet-121 model for classification, recording the accuracy and saving it to `results/metrics/classification_accuracy.txt`. Fine-tune the model and save it as `models/saved_models/chest_xray_densenet_model.pth`. Use Grad-CAM to visualize the model's decision-making process and save these visualizations as `results/figures/grad_cam_visualizations.gif`. Finally, create a Markdown report that documents the model architecture, training process, data augmentation techniques, and analysis of the results, and save it as `results/medical_image_classification_report.md`. It would also be nice if the system was flexible such that the DenseNet-121 could be easily further fine-tuned by a human user.",
4
+ "tags": [
5
+ "Classification",
6
+ "Computer Vision",
7
+ "Medical Analysis",
8
+ "Supervised Learning"
9
+ ],
10
+ "requirements": [
11
+ {
12
+ "requirement_id": 0,
13
+ "prerequisites": [],
14
+ "criteria": "The \"Kaggle Chest X-ray\" dataset is used, with data loading and preprocessing implemented in `src/data_loader.py`.",
15
+ "category": "Dataset or Environment",
16
+ "satisfied": null
17
+ },
18
+ {
19
+ "requirement_id": 1,
20
+ "prerequisites": [
21
+ 0
22
+ ],
23
+ "criteria": "Data augmentation is performed, including rotation, translation, and scaling of images in `src/data_loader.py`.",
24
+ "category": "Data preprocessing and postprocessing",
25
+ "satisfied": null
26
+ },
27
+ {
28
+ "requirement_id": 2,
29
+ "prerequisites": [
30
+ 1
31
+ ],
32
+ "criteria": "The pre-trained \"DenseNet-121\" model is fine-tuned saved in `models/saved_models/`.",
33
+ "category": "Machine Learning Method",
34
+ "satisfied": null
35
+ },
36
+ {
37
+ "requirement_id": 3,
38
+ "prerequisites": [
39
+ 1,
40
+ 2
41
+ ],
42
+ "criteria": "Classification accuracy is printed and saved as `results/metrics/classification_accuracy.txt`.",
43
+ "category": "Performance Metrics",
44
+ "satisfied": null
45
+ },
46
+ {
47
+ "requirement_id": 4,
48
+ "prerequisites": [
49
+ 2,
50
+ 3
51
+ ],
52
+ "criteria": "\"Grad-CAM\" is used to visualize model decisions, saving the visualizations as `results/figures/grad_cam_visualizations.gif`.",
53
+ "category": "Visualization",
54
+ "satisfied": null
55
+ },
56
+ {
57
+ "requirement_id": 5,
58
+ "prerequisites": [
59
+ 2,
60
+ 3
61
+ ],
62
+ "criteria": "A \"Markdown\" report is created containing the model architecture, training process, data augmentation, and result analysis, and saved as `results/medical_image_classification_report.md`.",
63
+ "category": "Other",
64
+ "satisfied": null
65
+ }
66
+ ],
67
+ "preferences": [
68
+ {
69
+ "preference_id": 0,
70
+ "criteria": "The \"Markdown\" report should include a section explaining the impact of data augmentation on model performance.",
71
+ "satisfied": null
72
+ },
73
+ {
74
+ "preference_id": 1,
75
+ "criteria": "The \"Grad-CAM\" visualizations should clearly highlight the areas of the images that contributed most to the model's decisions.",
76
+ "satisfied": null
77
+ },
78
+ {
79
+ "preference_id": 2,
80
+ "criteria": "The system should be flexible to allow further fine-tuning of the \"DenseNet-121\" model.",
81
+ "satisfied": null
82
+ }
83
+ ],
84
+ "is_kaggle_api_needed": true,
85
+ "is_training_needed": true,
86
+ "is_web_navigation_needed": false
87
+ }
instances/43_Social_Network_Analysis_GCN_Cora_ML.json ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "43_Social_Network_Analysis_GCN_Cora_ML",
3
+ "query": "Hey! Could you help me create a social network analysis system using a GCN model with the Cora citation network dataset? First, let's load and preprocess the dataset, including normalization and denoising, in `src/data_loader.py`. Then, apply the GCN model to classify the nodes and tune the hyperparameters such as the learning rate and hidden layer size to get the best results in `src/train.py`. Save the model under `models/saved_models/`. Once you've done that, please save the node classification performance to `results/metrics/node_classification_results.txt`. Visualize the citation network structure and save it as `results/figures/citation_network_visualization.png`. Lastly, create an interactive network graph using either D3.js or Bokeh to showcase the node classification results and network visualization in `results/figures/`. It would also be great if your implementation could allow a programmer to easily swap to other citation datasets. Thanks a lot for your help!",
4
+ "tags": [
5
+ "Unsupervised Learning"
6
+ ],
7
+ "requirements": [
8
+ {
9
+ "requirement_id": 0,
10
+ "prerequisites": [],
11
+ "criteria": "The \"Cora citation network\" dataset is loaded in `src/data_loader.py`.",
12
+ "category": "Dataset or Environment",
13
+ "satisfied": null
14
+ },
15
+ {
16
+ "requirement_id": 1,
17
+ "prerequisites": [
18
+ 0
19
+ ],
20
+ "criteria": "Data preprocessing is performed, including normalization and denoising, in `src/data_loader.py`.",
21
+ "category": "Data preprocessing and postprocessing",
22
+ "satisfied": null
23
+ },
24
+ {
25
+ "requirement_id": 2,
26
+ "prerequisites": [
27
+ 1
28
+ ],
29
+ "criteria": "Hyperparameters such as learning rate and hidden layer size are tuned to optimize the model in `src/train.py`.",
30
+ "category": "Machine Learning Method",
31
+ "satisfied": null
32
+ },
33
+ {
34
+ "requirement_id": 3,
35
+ "prerequisites": [
36
+ 2
37
+ ],
38
+ "criteria": "The model is saved under `models/saved_models/`.",
39
+ "category": "Save Trained Model",
40
+ "satisfied": null
41
+ },
42
+ {
43
+ "requirement_id": 4,
44
+ "prerequisites": [
45
+ 2
46
+ ],
47
+ "criteria": "Node classification performance are saved in `results/metrics/node_classification_results.txt`.",
48
+ "category": "Performence Metrics",
49
+ "satisfied": null
50
+ },
51
+ {
52
+ "requirement_id": 5,
53
+ "prerequisites": [
54
+ 2
55
+ ],
56
+ "criteria": "The citation network structure is visualized and saved as `results/figures/citation_network_visualization.png`.",
57
+ "category": "Visualization",
58
+ "satisfied": null
59
+ },
60
+ {
61
+ "requirement_id": 6,
62
+ "prerequisites": [
63
+ 4,
64
+ 5
65
+ ],
66
+ "criteria": "An interactive network graph is created using \"D3.js\" or \"Bokeh\" to showcase the node classification results and network visualization in `results/figures/`.",
67
+ "category": "Visualization",
68
+ "satisfied": null
69
+ }
70
+ ],
71
+ "preferences": [
72
+ {
73
+ "preference_id": 0,
74
+ "criteria": "The interactive network graph should allow users to explore individual nodes and their classifications dynamically.",
75
+ "satisfied": null
76
+ },
77
+ {
78
+ "preference_id": 1,
79
+ "criteria": "The citation network visualization should clearly differentiate between different node classes and relationships.",
80
+ "satisfied": null
81
+ },
82
+ {
83
+ "preference_id": 2,
84
+ "criteria": "The system should be designed to handle additional citation datasets with minimal modification.",
85
+ "satisfied": null
86
+ }
87
+ ],
88
+ "is_kaggle_api_needed": false,
89
+ "is_training_needed": true,
90
+ "is_web_navigation_needed": false
91
+ }
instances/44_Text_Classification_BERT_AGNews_DL.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "44_Text_Classification_BERT_AGNews_DL",
3
+ "query": "Hey! Could you help me build a text classification system using a pretrained BERT model on the AG News dataset? Start by loading and preprocessing the data in `src/data_loader.py` (including removing whatever noise you can and performing tokenization). Once that's done, please save the BERT model parameters under `models/saved_models/`. Apply the BERT model and save the classification results to `results/results.txt`. Also, visualize the confusion matrix using seaborn and save it as `results/figures/confusion_matrix.png`. Finally, create an interactive Jupyter Notebook to display the input texts alongside their classification results under `results/`. It would be great if the notebook explained how transfer learning was used and its impact on the model's performance. Likewise, it would be good if your implementation is straightforward to swap out the dataset, including the code to retrain BERT. Thanks a bunch!",
4
+ "tags": [
5
+ "Classification",
6
+ "Natural Language Processing",
7
+ "Supervised Learning"
8
+ ],
9
+ "requirements": [
10
+ {
11
+ "requirement_id": 0,
12
+ "prerequisites": [],
13
+ "criteria": "The \"AG News\" dataset is loaded in `src/data_loader.py`.",
14
+ "category": "Dataset or Environment",
15
+ "satisfied": null
16
+ },
17
+ {
18
+ "requirement_id": 1,
19
+ "prerequisites": [
20
+ 0
21
+ ],
22
+ "criteria": "Data preprocessing is performed in `src/data_loader.py`, including noise removal and tokenization.",
23
+ "category": "Data preprocessing and postprocessing",
24
+ "satisfied": null
25
+ },
26
+ {
27
+ "requirement_id": 2,
28
+ "prerequisites": [
29
+ 1
30
+ ],
31
+ "criteria": "The \"BERT\" model is applied for text classification and the parameters are saved under `models/saved_models/`.",
32
+ "category": "Machine Learning Method",
33
+ "satisfied": null
34
+ },
35
+ {
36
+ "requirement_id": 3,
37
+ "prerequisites": [
38
+ 2
39
+ ],
40
+ "criteria": "The classification results are saved as `results/results.txt`.",
41
+ "category": "Other",
42
+ "satisfied": null
43
+ },
44
+ {
45
+ "requirement_id": 4,
46
+ "prerequisites": [
47
+ 2
48
+ ],
49
+ "criteria": "The confusion matrix of classification result is visualized using \"seaborn,\" and saved as `results/figures/confusion_matrix.png`.",
50
+ "category": "Visualization",
51
+ "satisfied": null
52
+ },
53
+ {
54
+ "requirement_id": 5,
55
+ "prerequisites": [
56
+ 3
57
+ ],
58
+ "criteria": "An interactive \"Jupyter Notebook\" is created to display input texts and their classification results under `results`.",
59
+ "category": "Human Computer Interaction",
60
+ "satisfied": null
61
+ }
62
+ ],
63
+ "preferences": [
64
+ {
65
+ "preference_id": 0,
66
+ "criteria": "The Jupyter Notebook should explain how transfer learning was applied and its impact on model performance.",
67
+ "satisfied": null
68
+ },
69
+ {
70
+ "preference_id": 1,
71
+ "criteria": "The confusion matrix visualization should clearly differentiate between correctly and incorrectly classified samples.",
72
+ "satisfied": null
73
+ },
74
+ {
75
+ "preference_id": 2,
76
+ "criteria": "The system should allow for easy retraining of the \"BERT\" model with new data.",
77
+ "satisfied": null
78
+ }
79
+ ],
80
+ "is_kaggle_api_needed": false,
81
+ "is_training_needed": true,
82
+ "is_web_navigation_needed": false
83
+ }
instances/45_Product_Recommendation_MatrixFactorization_AmazonReviews_ML.json ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "45_Product_Recommendation_MatrixFactorization_AmazonReviews_ML",
3
+ "query": "Could you help me set up a product recommendation system using a matrix factorization algorithm with the Electronics subset of the Amazon Reviews 2023 dataset? You should handle data loading and all the data preprocessing, including noise removal and normalization in `src/data_loader.py`. Apply a latent factor model to compute user-item interactions and save the implementation in `src/model.py`. Print and save the recommendation results to `results/recommendation_results.txt`. Then visualize these recommendations using the Plotly library and save the visualization as `results/figures/recommendation_visualization.html`. Finally, generate an analysis report covering data preprocessing, model training, and results, should be saved as `results/recommendation_report.md`.",
4
+ "tags": [
5
+ "Recommender Systems"
6
+ ],
7
+ "requirements": [
8
+ {
9
+ "requirement_id": 0,
10
+ "prerequisites": [],
11
+ "criteria": "The \"Electronics\" subset of the \"Amazon Reviews 2023\" dataset is loaded in `src/data_loader.py`.",
12
+ "category": "Dataset or Environment",
13
+ "satisfied": null
14
+ },
15
+ {
16
+ "requirement_id": 1,
17
+ "prerequisites": [
18
+ 0
19
+ ],
20
+ "criteria": "Data preprocessing is performed, including noise removal and normalization in `src/data_loader.py`.",
21
+ "category": "Data preprocessing and postprocessing",
22
+ "satisfied": null
23
+ },
24
+ {
25
+ "requirement_id": 2,
26
+ "prerequisites": [],
27
+ "criteria": "A \"Latent Factor model\" to computer user-item interactions is implemented in `src/model.py`.",
28
+ "category": "Machine Learning Method",
29
+ "satisfied": null
30
+ },
31
+ {
32
+ "requirement_id": 3,
33
+ "prerequisites": [
34
+ 0,
35
+ 2,
36
+ 3
37
+ ],
38
+ "criteria": "Recommendation results are saved as `results/recommendation_results.txt`.",
39
+ "category": "Other",
40
+ "satisfied": null
41
+ },
42
+ {
43
+ "requirement_id": 4,
44
+ "prerequisites": [
45
+ 0,
46
+ 2,
47
+ 3,
48
+ 4
49
+ ],
50
+ "criteria": "Recommendation results are visualized using \"Plotly\" and saved as `results/figures/recommendation_visualization.html`.",
51
+ "category": "Visualization",
52
+ "satisfied": null
53
+ },
54
+ {
55
+ "requirement_id": 5,
56
+ "prerequisites": [
57
+ 0,
58
+ 2,
59
+ 3,
60
+ 4
61
+ ],
62
+ "criteria": "An analysis report containing data preprocessing, model training, and recommendation results is generated and saved as `results/recommendation_report.md`.",
63
+ "category": "Other",
64
+ "satisfied": null
65
+ }
66
+ ],
67
+ "preferences": [
68
+ {
69
+ "preference_id": 0,
70
+ "criteria": "The impact of different preprocessing steps on recommendation accuracy should be discussed in the analysis report.",
71
+ "satisfied": null
72
+ },
73
+ {
74
+ "preference_id": 1,
75
+ "criteria": "The visualization should be interactive, allowing users to explore the recommendation results for different products.",
76
+ "satisfied": null
77
+ },
78
+ {
79
+ "preference_id": 2,
80
+ "criteria": "The system should be designed to easily incorporate additional user behavior data or product features for future improvements.",
81
+ "satisfied": null
82
+ }
83
+ ],
84
+ "is_kaggle_api_needed": false,
85
+ "is_training_needed": true,
86
+ "is_web_navigation_needed": false
87
+ }
instances/46_Speech_Recognition_DeepSpeech_LibriSpeech_DL.json ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "46_Speech_Recognition_DeepSpeech_LibriSpeech_DL",
3
+ "query": "I'd like to develop a speech recognition system using the DeepSpeech library and the LibriSpeech dataset for me. Could you implement data loading and audio preprocessing, including noise reduction and normalization, in `src/data_loader.py`? Tune the hyperparameters such as learning rate and batch size in `src/train.py`. Please save the recognition results in `results/recognition_results.txt`. Next, create visualizations of the audio processing stages (like waveform and spectrogram) and save them as `results/figures/audio_visualization.png`. Generate a detailed report on recognition accuracy, error analysis, and suggestions for future improvements, and save it as `results/recognition_report.md`. Additionally, document the setup process for DeepSpeech, with tips for common installation issues, with [DeepSpeech documentation](https://deepspeech.readthedocs.io/en/r0.9/) as a reference. Save the final model in `models/saved_models/`. Thanks in advance!",
4
+ "tags": [
5
+ "Audio Processing"
6
+ ],
7
+ "requirements": [
8
+ {
9
+ "requirement_id": 0,
10
+ "prerequisites": [],
11
+ "criteria": "\"LibriSpeech\" dataset is loaded in `src/data_loader.py`.",
12
+ "category": "Dataset or Environment",
13
+ "satisfied": null
14
+ },
15
+ {
16
+ "requirement_id": 1,
17
+ "prerequisites": [
18
+ 0
19
+ ],
20
+ "criteria": "Audio preprocessing, including noise reduction and normalization, is performed in `src/data_loader.py`.",
21
+ "category": "Data preprocessing and postprocessing",
22
+ "satisfied": null
23
+ },
24
+ {
25
+ "requirement_id": 2,
26
+ "prerequisites": [
27
+ 1
28
+ ],
29
+ "criteria": "Hyperparameters such as learning rate and batch size are tuned in `src/train.py`.",
30
+ "category": "Machine Learning Method",
31
+ "satisfied": null
32
+ },
33
+ {
34
+ "requirement_id": 3,
35
+ "prerequisites": [
36
+ 2
37
+ ],
38
+ "criteria": "Save the speech recognition model in `models/saved_models/`.",
39
+ "category": "Save Trained Model",
40
+ "satisfied": null
41
+ },
42
+ {
43
+ "requirement_id": 4,
44
+ "prerequisites": [
45
+ 2
46
+ ],
47
+ "criteria": "Recognition results are saved as `results/recognition_results.txt`.",
48
+ "category": "Other",
49
+ "satisfied": null
50
+ },
51
+ {
52
+ "requirement_id": 5,
53
+ "prerequisites": [
54
+ 1
55
+ ],
56
+ "criteria": "Visualizations of audio processing, like waveform and spectrogram, are generated and saved as `results/figures/audio_visualization.png`.",
57
+ "category": "Visualization",
58
+ "satisfied": null
59
+ },
60
+ {
61
+ "requirement_id": 6,
62
+ "prerequisites": [
63
+ 2
64
+ ],
65
+ "criteria": "A report containing recognition accuracy, error analysis, and future improvement suggestions is generated and saved as `results/recognition_report.md`.",
66
+ "category": "Performance Metrics",
67
+ "satisfied": null
68
+ }
69
+ ],
70
+ "preferences": [
71
+ {
72
+ "preference_id": 0,
73
+ "criteria": "The installation process for the \"DeepSpeech\" library should be well-documented, with troubleshooting tips if the library fails to install. Refer to the [DeepSpeech documentation](https://deepspeech.readthedocs.io/en/r0.9/) for guidance.",
74
+ "satisfied": null
75
+ },
76
+ {
77
+ "preference_id": 1,
78
+ "criteria": "The visualizations should clearly depict the stages of audio processing, making it easy to interpret the effects of preprocessing.",
79
+ "satisfied": null
80
+ },
81
+ {
82
+ "preference_id": 2,
83
+ "criteria": "The report should include recommendations for alternative models or approaches if the \"DeepSpeech\" library proves challenging to implement.",
84
+ "satisfied": null
85
+ }
86
+ ],
87
+ "is_kaggle_api_needed": false,
88
+ "is_training_needed": true,
89
+ "is_web_navigation_needed": true
90
+ }
instances/47_Network_Traffic_Analysis_KMeans_NetworkTraffic_ML.json ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "47_Network_Traffic_Analysis_KMeans_NetworkTraffic_ML",
3
+ "query": "Develop a network traffic analysis system using the K-means clustering algorithm with the Network Intrusion dataset (CIC-IDS-2017) from Kaggle. Load the dataset and standardize the data to ensure feature values are within the same range in `src/data_loader.py`. Implement the K-means clustering algorithm in `src/model.py`. Evaluate the clusters using the silhouette coefficient and save the evaluation results under `results/metrics/`. Save the clustering results as `results/clustering_results.txt`. Visualize the clustering outcomes and save the visuals as `results/figures/network_traffic_visualization.png`. Create an interactive dashboard with Dash or Bokeh to explore the clustering results, and save the dashboard under `results/`. Ensure the system is modular so that a user could quickly change the clustering algorithm.",
4
+ "tags": [
5
+ "Unsupervised Learning"
6
+ ],
7
+ "requirements": [
8
+ {
9
+ "requirement_id": 0,
10
+ "prerequisites": [],
11
+ "criteria": "\"Network Intrusion dataset (CIC-IDS-2017)\" from Kaggle is loaded in `src/data_loader.py`.",
12
+ "category": "Dataset or Environment",
13
+ "satisfied": null
14
+ },
15
+ {
16
+ "requirement_id": 1,
17
+ "prerequisites": [
18
+ 0
19
+ ],
20
+ "criteria": "Data is standardized to ensure feature values are within the same range in `src/data_loader.py`.",
21
+ "category": "Data preprocessing and postprocessing",
22
+ "satisfied": null
23
+ },
24
+ {
25
+ "requirement_id": 2,
26
+ "prerequisites": [],
27
+ "criteria": "\"K-means\" clustering algorithm is implemented in `src/model.py`.",
28
+ "category": "Machine Learning Method",
29
+ "satisfied": null
30
+ },
31
+ {
32
+ "requirement_id": 3,
33
+ "prerequisites": [
34
+ 1,
35
+ 2
36
+ ],
37
+ "criteria": "The \"silhouette coefficient\" is used for evaluation. The evaluation results are saved under `results/metrics/`.",
38
+ "category": "Performance Metrics",
39
+ "satisfied": null
40
+ },
41
+ {
42
+ "requirement_id": 4,
43
+ "prerequisites": [
44
+ 1,
45
+ 2
46
+ ],
47
+ "criteria": "Clustering results are printed and saved as `results/clustering_results.txt`.",
48
+ "category": "Other",
49
+ "satisfied": null
50
+ },
51
+ {
52
+ "requirement_id": 5,
53
+ "prerequisites": [
54
+ 4
55
+ ],
56
+ "criteria": "Network traffic clustering is visualized and saved as `results/figures/network_traffic_visualization.png`.",
57
+ "category": "Visualization",
58
+ "satisfied": null
59
+ },
60
+ {
61
+ "requirement_id": 6,
62
+ "prerequisites": [
63
+ 4
64
+ ],
65
+ "criteria": "An interactive dashboard using \"Dash\" or \"Bokeh\" is created to showcase clustering results and saved under `results/`.",
66
+ "category": "Visualization",
67
+ "satisfied": null
68
+ }
69
+ ],
70
+ "preferences": [
71
+ {
72
+ "preference_id": 0,
73
+ "criteria": "The dashboard should allow users to filter and drill down into specific clusters for detailed analysis.",
74
+ "satisfied": null
75
+ },
76
+ {
77
+ "preference_id": 1,
78
+ "criteria": "Visualizations should clearly distinguish between different clusters, making it easy to identify patterns in the network traffic data.",
79
+ "satisfied": null
80
+ },
81
+ {
82
+ "preference_id": 2,
83
+ "criteria": "The project should be modular, allowing a user to easily swap the clustering algorithm.",
84
+ "satisfied": null
85
+ }
86
+ ],
87
+ "is_kaggle_api_needed": true,
88
+ "is_training_needed": true,
89
+ "is_web_navigation_needed": false
90
+ }
instances/48_Stock_Trading_Simulation_PPO_HistoricalData_RL.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "48_Stock_Trading_Simulation_PPO_HistoricalData_RL",
3
+ "query": "Hey! I'm interested in developing a stock trading agent using the Proximal Policy Optimization (PPO) algorithm. The idea is to use historical market data for training and testing. A stock trading simulation environment should be implemented in `src/env.py`. The Proximal Policy Optimization (PPO) algorithm should be implemented in `src/train.py`. Please save the trained agent under `models/saved_models/`. Record all the trade decisions in `results/trade_decisions.txt` and save the total profit in `results/metrics/total_profit.txt`. Visualize the profit curve and save it as `results/figures/profit_curve.png`. Generate a report that covers the trading strategy, profit, and risk analysis, and save it as `results/trading_strategy_report.md`. Implement an interactive tool using Streamlit in `src/visualize.py` that allows users to try different parameters and run simulations.",
4
+ "tags": [
5
+ "Financial Analysis",
6
+ "Reinforcement Learning"
7
+ ],
8
+ "requirements": [
9
+ {
10
+ "requirement_id": 0,
11
+ "prerequisites": [],
12
+ "criteria": "A stock trading simulation environment is implemented in `src/env.py`.",
13
+ "category": "Dataset or Environment",
14
+ "satisfied": null
15
+ },
16
+ {
17
+ "requirement_id": 1,
18
+ "prerequisites": [
19
+ 0
20
+ ],
21
+ "criteria": "Historical market data is used for training and testing.",
22
+ "category": "Dataset or Environment",
23
+ "satisfied": null
24
+ },
25
+ {
26
+ "requirement_id": 2,
27
+ "prerequisites": [],
28
+ "criteria": "The \"Proximal Policy Optimization (PPO)\" algorithm is implemented in `src/train.py`.",
29
+ "category": "Machine Learning Method",
30
+ "satisfied": null
31
+ },
32
+ {
33
+ "requirement_id": 3,
34
+ "prerequisites": [
35
+ 1,
36
+ 2
37
+ ],
38
+ "criteria": "Trade decisions are recorded and saved as `results/trade_decisions.txt`.",
39
+ "category": "Other",
40
+ "satisfied": null
41
+ },
42
+ {
43
+ "requirement_id": 4,
44
+ "prerequisites": [
45
+ 3
46
+ ],
47
+ "criteria": "Total profit is saved as `results/metrics/total_profit.txt`.",
48
+ "category": "Other",
49
+ "satisfied": null
50
+ },
51
+ {
52
+ "requirement_id": 5,
53
+ "prerequisites": [
54
+ 4
55
+ ],
56
+ "criteria": "The profit curve is visualized and saved as `results/figures/profit_curve.png`.",
57
+ "category": "Visualization",
58
+ "satisfied": null
59
+ },
60
+ {
61
+ "requirement_id": 6,
62
+ "prerequisites": [
63
+ 4
64
+ ],
65
+ "criteria": "A report containing trading strategy, profit, and risk analysis is generated and saved as `results/trading_strategy_report.md`.",
66
+ "category": "Other",
67
+ "satisfied": null
68
+ },
69
+ {
70
+ "requirement_id": 7,
71
+ "prerequisites": [
72
+ 1,
73
+ 2
74
+ ],
75
+ "criteria": "An interactive tool allowing users to try different parameters and run simulations using \"Streamlit\" is implemented in `src/visualize.py`.",
76
+ "category": "Human Computer Interaction",
77
+ "satisfied": null
78
+ }
79
+ ],
80
+ "preferences": [
81
+ {
82
+ "preference_id": 0,
83
+ "criteria": "The profit curve visualization should highlight significant trades or events that impacted performance.",
84
+ "satisfied": null
85
+ },
86
+ {
87
+ "preference_id": 1,
88
+ "criteria": "The report should include insights on how parameter tuning affects the trading outcome.",
89
+ "satisfied": null
90
+ }
91
+ ],
92
+ "is_kaggle_api_needed": false,
93
+ "is_training_needed": true,
94
+ "is_web_navigation_needed": false
95
+ }
instances/49_Explainable_AI_LIME_Titanic_ML.json ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "49_Explainable_AI_LIME_Titanic_ML",
3
+ "query": "Hi there! I'm looking to create a project that explains model predictions using LIME, specifically with the Titanic survival prediction dataset. First, load the dataset in `src/data_loader.py`.Then, train a Random Forest classifier and save it under `models/saved_models/`? Finally, use LIME to explain the Random Forest classifier predictions and implement it in `src/visualize.py`. Generate a report including the explanations and save it as `results/model_explanation.md`. The report should be built with either Dash or Bokeh, implemented in `src/report.py`, so users can explore how different features affect the model's predictions. The explanation should be clear and easy to understand for non-tech folks. Additionally, save a well-labeled intuitive feature importance plot in `results/figures/feature_importance.png`. Thanks!",
4
+ "tags": [
5
+ "Classification"
6
+ ],
7
+ "requirements": [
8
+ {
9
+ "requirement_id": 0,
10
+ "prerequisites": [],
11
+ "criteria": "The \"Titanic\" survival prediction dataset is loaded in `src/data_loader.py`.",
12
+ "category": "Dataset or Environment",
13
+ "satisfied": null
14
+ },
15
+ {
16
+ "requirement_id": 1,
17
+ "prerequisites": [
18
+ 0
19
+ ],
20
+ "criteria": "A \"Random Forest classifier\" is trained for survival prediction.",
21
+ "category": "Machine Learning Method",
22
+ "satisfied": null
23
+ },
24
+ {
25
+ "requirement_id": 2,
26
+ "prerequisites": [
27
+ 0,
28
+ 1
29
+ ],
30
+ "criteria": "\"LIME\" is used for model prediction explanation and implemented in `src/visualize.py`.",
31
+ "category": "Human Computer Interaction",
32
+ "satisfied": null
33
+ },
34
+ {
35
+ "requirement_id": 3,
36
+ "prerequisites": [
37
+ 0,
38
+ 1,
39
+ 2
40
+ ],
41
+ "criteria": "A model prediction explanation report is generated and saved as `results/model_explanation.md`.",
42
+ "category": "Other",
43
+ "satisfied": null
44
+ },
45
+ {
46
+ "requirement_id": 4,
47
+ "prerequisites": [
48
+ 2
49
+ ],
50
+ "criteria": "A feature importance plot is saved as `results/figures/feature_importance.png`.",
51
+ "category": "Visualization",
52
+ "satisfied": null
53
+ },
54
+ {
55
+ "requirement_id": 5,
56
+ "prerequisites": [
57
+ 0,
58
+ 1,
59
+ 2,
60
+ 4
61
+ ],
62
+ "criteria": "An interactive report showcasing the impact of different features on predictions is created using \"Dash\" or \"Bokeh\" and implemented in `src/report.py`.",
63
+ "category": "Other",
64
+ "satisfied": null
65
+ },
66
+ {
67
+ "requirement_id": 6,
68
+ "prerequisites": [
69
+ 1
70
+ ],
71
+ "criteria": "The trained model is saved under `models/saved_models/`.",
72
+ "category": "Save Trained Model",
73
+ "satisfied": null
74
+ }
75
+ ],
76
+ "preferences": [
77
+ {
78
+ "preference_id": 0,
79
+ "criteria": "The explanation report should be written in a clear and accessible style, making it understandable even for those without a deep technical background.",
80
+ "satisfied": null
81
+ },
82
+ {
83
+ "preference_id": 1,
84
+ "criteria": "The feature importance plot should be visually intuitive, with clear labels and descriptions.",
85
+ "satisfied": null
86
+ }
87
+ ],
88
+ "is_kaggle_api_needed": false,
89
+ "is_training_needed": true,
90
+ "is_web_navigation_needed": false
91
+ }
instances/50_Math_Problem_Solving_Transformer_DeepMindMath_DL.json ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "50_Math_Problem_Solving_Transformer_DeepMindMath_DL",
3
+ "query": "Hi! I need help with a project that uses a Transformer model to solve math problems from the DeepMind Mathematics dataset. Please load the dataset and preprocessing it in `src/data_loader.py`. The preprocessing should parse and standardize the math expressions in a syntactically consistent way so the model can easily process them. Implement the Transformer in `src/model.py`. Also, tune the hyperparameters such as the learning rate and the batch size in `src/train.py`, and save the training loss curve to `results/figures/training_loss_curve.png`. Sample and save some Transformer generated solutions in `results/sample_solutions.txt`. Using your model, create a simple interactive tool with Gradio or Streamlit in `src/interface.py` that can solve various user given math problems. Lastly, generate a report on how the model performs with different types of problems, including model accuracy, error analysis, and future improvement suggestions. Save it as `results/metrics/model_report.md`. Thanks in advance!",
4
+ "tags": [
5
+ "Natural Language Processing"
6
+ ],
7
+ "requirements": [
8
+ {
9
+ "requirement_id": 0,
10
+ "prerequisites": [],
11
+ "criteria": "DeepMind Mathematics dataset is loaded in `src/data_loader.py`.",
12
+ "category": "Dataset or Environment",
13
+ "satisfied": null
14
+ },
15
+ {
16
+ "requirement_id": 1,
17
+ "prerequisites": [
18
+ 0
19
+ ],
20
+ "criteria": "Data preprocessing is performed including parsing and standardizing mathematical expressions in `src/data_loader.py`.",
21
+ "category": "Data preprocessing and postprocessing",
22
+ "satisfied": null
23
+ },
24
+ {
25
+ "requirement_id": 2,
26
+ "prerequisites": [],
27
+ "criteria": "A \"Transformer\" model is implemented in `src/model.py`.",
28
+ "category": "Machine Learning Method",
29
+ "satisfied": null
30
+ },
31
+ {
32
+ "requirement_id": 3,
33
+ "prerequisites": [
34
+ 0,
35
+ 1,
36
+ 2
37
+ ],
38
+ "criteria": "Hyperparameters such as learning rate and batch size are tuned in `src/train.py`.",
39
+ "category": "Machine Learning Method",
40
+ "satisfied": null
41
+ },
42
+ {
43
+ "requirement_id": 4,
44
+ "prerequisites": [
45
+ 0,
46
+ 1,
47
+ 2,
48
+ 3
49
+ ],
50
+ "criteria": "Model training loss curve is saved as `results/figures/training_loss_curve.png`.",
51
+ "category": "Visualization",
52
+ "satisfied": null
53
+ },
54
+ {
55
+ "requirement_id": 5,
56
+ "prerequisites": [
57
+ 0,
58
+ 1,
59
+ 2,
60
+ 3
61
+ ],
62
+ "criteria": "Some Transformer generated solutions are saved in `results/sample_solutions.txt`.",
63
+ "category": "Other",
64
+ "satisfied": null
65
+ },
66
+ {
67
+ "requirement_id": 6,
68
+ "prerequisites": [
69
+ 0,
70
+ 1,
71
+ 2,
72
+ 3
73
+ ],
74
+ "criteria": "An interactive tool is created allowing users to input mathematical problems and receive solutions using \"Gradio\" or \"Streamlit\" in `src/interface.py`.",
75
+ "category": "Human Computer Interaction",
76
+ "satisfied": null
77
+ },
78
+ {
79
+ "requirement_id": 7,
80
+ "prerequisites": [
81
+ 0,
82
+ 1,
83
+ 2,
84
+ 3,
85
+ 4
86
+ ],
87
+ "criteria": "A report is generated containing model accuracy, error analysis, and future improvement suggestions, and saved as `results/metrics/model_report.md`.",
88
+ "category": "Other",
89
+ "satisfied": null
90
+ }
91
+ ],
92
+ "preferences": [
93
+ {
94
+ "preference_id": 0,
95
+ "criteria": "The preprocessing step should ensure that the mathematical expressions are standardized in a way that makes them easily processed by the model.",
96
+ "satisfied": null
97
+ },
98
+ {
99
+ "preference_id": 1,
100
+ "criteria": "The interactive tool should be capable of handling a wide variety of mathematical problem types.",
101
+ "satisfied": null
102
+ },
103
+ {
104
+ "preference_id": 2,
105
+ "criteria": "The report should provide insights into how the model handles different types of mathematical problems, identifying specific strengths and areas for improvement.",
106
+ "satisfied": null
107
+ }
108
+ ],
109
+ "is_kaggle_api_needed": false,
110
+ "is_training_needed": true,
111
+ "is_web_navigation_needed": false
112
+ }