From f902b99ee53aa21e48f26cff1ab171d6755a6500 Mon Sep 17 00:00:00 2001 From: Prathmesh Rajurkar <117176537+Prathmesh-rajurkar@users.noreply.github.com> Date: Tue, 1 Oct 2024 22:19:14 +0530 Subject: [PATCH 1/3] Update core.py Your issue #8 , is been solved in this commit --- explainableai/core.py | 88 +++++++++++++++++++++++++++---------------- 1 file changed, 55 insertions(+), 33 deletions(-) diff --git a/explainableai/core.py b/explainableai/core.py index 50fd1ed..834d5a2 100644 --- a/explainableai/core.py +++ b/explainableai/core.py @@ -1,5 +1,5 @@ # explainableai/core.py - +from typing import List import pandas as pd import numpy as np from sklearn.model_selection import train_test_split @@ -141,54 +141,76 @@ def analyze(self): self.results = results return results - def generate_report(self, filename='xai_report.pdf'): + def generate_report(self, filename='xai_report.pdf', section: List): #section=[] , includes _all , model_comparison , model_performance , etc if self.results is None: raise ValueError("No analysis results available. Please run analyze() first.") report = ReportGenerator(filename) report.add_heading("Explainable AI Report") - report.add_heading("Model Comparison", level=2) - model_comparison_data = [["Model", "CV Score", "Test Score"]] - for model, scores in self.results['model_comparison'].items(): - model_comparison_data.append([model, f"{scores['cv_score']:.4f}", f"{scores['test_score']:.4f}"]) - report.add_table(model_comparison_data) + # Model Comparison + def model_comparison(): + report.add_heading("Model Comparison", level=2) + model_comparison_data = [["Model", "CV Score", "Test Score"]] + for model, scores in self.results['model_comparison'].items(): + model_comparison_data.append([model, f"{scores['cv_score']:.4f}", f"{scores['test_score']:.4f}"]) + report.add_table(model_comparison_data) # Model Performance - report.add_heading("Model Performance", level=2) - for metric, value in self.results['model_performance'].items(): - if isinstance(value, (int, float, np.float64)): - report.add_paragraph(f"**{metric}:** {value:.4f}") - else: - report.add_paragraph(f"**{metric}:**\n{value}") + def model_performance(): + report.add_heading("Model Performance", level=2) + for metric, value in self.results['model_performance'].items(): + if isinstance(value, (int, float, np.float64)): + report.add_paragraph(f"**{metric}:** {value:.4f}") + else: + report.add_paragraph(f"**{metric}:**\n{value}") # Feature Importance - report.add_heading("Feature Importance", level=2) - feature_importance_data = [["Feature", "Importance"]] + [[feature, f"{importance:.4f}"] for feature, importance in self.feature_importance.items()] - report.add_table(feature_importance_data) + def feature_importance(): + report.add_heading("Feature Importance", level=2) + feature_importance_data = [["Feature", "Importance"]] + [[feature, f"{importance:.4f}"] for feature, importance in self.feature_importance.items()] + report.add_table(feature_importance_data) # Visualizations - report.add_heading("Visualizations", level=2) - report.add_image('feature_importance.png') - report.content.append(PageBreak()) - report.add_image('partial_dependence.png') - report.content.append(PageBreak()) - report.add_image('learning_curve.png') - report.content.append(PageBreak()) - report.add_image('correlation_heatmap.png') - if self.is_classifier: + def visualization(): + report.add_heading("Visualizations", level=2) + report.add_image('feature_importance.png') report.content.append(PageBreak()) - report.add_image('roc_curve.png') + report.add_image('partial_dependence.png') report.content.append(PageBreak()) - report.add_image('precision_recall_curve.png') + report.add_image('learning_curve.png') + report.content.append(PageBreak()) + report.add_image('correlation_heatmap.png') + if self.is_classifier: + report.content.append(PageBreak()) + report.add_image('roc_curve.png') + report.content.append(PageBreak()) + report.add_image('precision_recall_curve.png') # LLM Explanation - report.add_heading("LLM Explanation", level=2) - report.add_llm_explanation(self.results['llm_explanation']) - - report.generate() - + def llm_explanation(): + report.add_heading("LLM Explanation", level=2) + report.add_llm_explanation(self.results['llm_explanation']) + + report.generate() + + if("model_comparison" in section): + model_comparison() + if("model_performance" in section): + model_performance() + if("feature_importance" in section): + feature_importance() + if("visualization" in section): + visualization() + if("llm_explanation" in section): + llm_explanation() + if("_all" in section): + model_comparison() + model_performance() + feature_importance() + visualization() + llm_explanation() def predict(self, X): if self.model is None: raise ValueError("Model has not been fitted. Please run fit() first.") @@ -306,4 +328,4 @@ def perform_eda(df): # Class distribution for the target variable (assuming last column is target) target_col = df.columns[-1] print(f"\nClass distribution for target variable '{target_col}':") - print(df[target_col].value_counts(normalize=True)) \ No newline at end of file + print(df[target_col].value_counts(normalize=True)) From 327caef5659f4b7bae12835342b54d1228e61a5a Mon Sep 17 00:00:00 2001 From: Prathmesh Rajurkar <117176537+Prathmesh-rajurkar@users.noreply.github.com> Date: Tue, 1 Oct 2024 22:47:08 +0530 Subject: [PATCH 2/3] Update core.py Resolved the bug of non functional argument and funtional arguments --- explainableai/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/explainableai/core.py b/explainableai/core.py index 834d5a2..bb4d221 100644 --- a/explainableai/core.py +++ b/explainableai/core.py @@ -141,7 +141,7 @@ def analyze(self): self.results = results return results - def generate_report(self, filename='xai_report.pdf', section: List): #section=[] , includes _all , model_comparison , model_performance , etc + def generate_report(self, section: List = ["_all"] , filename='xai_report.pdf' ): #section=[] , includes _all , model_comparison , model_performance , etc if self.results is None: raise ValueError("No analysis results available. Please run analyze() first.") From 7e33ddb91e70fce9954065862738d450feb04e6b Mon Sep 17 00:00:00 2001 From: Prathmesh Rajurkar <117176537+Prathmesh-rajurkar@users.noreply.github.com> Date: Wed, 2 Oct 2024 15:05:04 +0530 Subject: [PATCH 3/3] Update core.py Made Changes in generate_report , it ask for permission before adding any section report into "xia_report" --- explainableai/core.py | 88 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 71 insertions(+), 17 deletions(-) diff --git a/explainableai/core.py b/explainableai/core.py index bb4d221..d651f3d 100644 --- a/explainableai/core.py +++ b/explainableai/core.py @@ -141,7 +141,7 @@ def analyze(self): self.results = results return results - def generate_report(self, section: List = ["_all"] , filename='xai_report.pdf' ): #section=[] , includes _all , model_comparison , model_performance , etc + def generate_report(self,filename='xai_report.pdf' ): #section=[] , includes _all , model_comparison , model_performance , etc if self.results is None: raise ValueError("No analysis results available. Please run analyze() first.") @@ -195,22 +195,76 @@ def llm_explanation(): report.generate() - if("model_comparison" in section): - model_comparison() - if("model_performance" in section): - model_performance() - if("feature_importance" in section): - feature_importance() - if("visualization" in section): - visualization() - if("llm_explanation" in section): - llm_explanation() - if("_all" in section): - model_comparison() - model_performance() - feature_importance() - visualization() - llm_explanation() + while True: + all_section_perm = input("Do you want all sections in the xia_report? (y/n) ").lower() + + if all_section_perm in ["yes", "y"]: + model_comparison() + model_performance() + feature_importance() + visualization() + llm_explanation() + break + + elif all_section_perm in ["no", "n"]: + while True: + model_comp_perm = input("Do you want model_comparison in xia_report? (y/n) ").lower() + if model_comp_perm in ["yes", "y"]: + model_comparison() + break + elif model_comp_perm in ["no", "n"]: + break + else: + print("Invalid input. Please enter 'y' or 'n'.") + + while True: + model_perf_perm = input("Do you want model_performance in xia_report? (y/n) ").lower() + if model_perf_perm in ["yes", "y"]: + model_performance() + break + elif model_perf_perm in ["no", "n"]: + break + else: + print("Invalid input. Please enter 'y' or 'n'.") + + while True: + feature_imp_perm = input("Do you want feature_importance in xia_report? (y/n) ").lower() + if feature_imp_perm in ["yes", "y"]: + feature_importance() + break + elif feature_imp_perm in ["no", "n"]: + break + else: + print("Invalid input. Please enter 'y' or 'n'.") + + while True: + visualization_perm = input("Do you want visualization in xia_report? (y/n) ").lower() + if visualization_perm in ["yes", "y"]: + visualization() + break + elif visualization_perm in ["no", "n"]: + break + else: + print("Invalid input. Please enter 'y' or 'n'.") + + while True: + llm_expl_perm = input("Do you want llm_explanation in xia_report? (y/n) ").lower() + if llm_expl_perm in ["yes", "y"]: + llm_explanation() + break + elif llm_expl_perm in ["no", "n"]: + break + else: + print("Invalid input. Please enter 'y' or 'n'.") + break + + else: + print("Invalid input. Please enter 'y' or 'n' ") + + + + + def predict(self, X): if self.model is None: raise ValueError("Model has not been fitted. Please run fit() first.")