Exploratory Data Analysis by aj-geddes/useful-ai-prompts
npx skills add https://github.com/aj-geddes/useful-ai-prompts --skill 'Exploratory Data Analysis'探索性数据分析 (EDA) 是数据科学项目中至关重要的第一步,它系统地检查数据集以了解其特征、识别模式,并在正式建模之前评估数据质量。
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Load and explore data
df = pd.read_csv('customer_data.csv')
# Basic profiling
print(f"Shape: {df.shape}")
print(f"Data types:\n{df.dtypes}")
print(f"Missing values:\n{df.isnull().sum()}")
print(f"Duplicates: {df.duplicated().sum()}")
# Statistical summary
print(df.describe())
print(df.describe(include='object'))
# Distribution analysis - numerical columns
fig, axes = plt.subplots(2, 2, figsize=(12, 8))
df['age'].hist(bins=30, ax=axes[0, 0])
axes[0, 0].set_title('Age Distribution')
df['income'].hist(bins=30, ax=axes[0, 1])
axes[0, 1].set_title('Income Distribution')
# Box plots for outlier detection
df.boxplot(column='age', by='region', ax=axes[1, 0])
axes[1, 0].set_title('Age by Region')
# Categorical analysis
df['category'].value_counts().plot(kind='bar', ax=axes[1, 1])
axes[1, 1].set_title('Category Distribution')
plt.tight_layout()
plt.show()
# Correlation analysis
numeric_df = df.select_dtypes(include=[np.number])
correlation_matrix = numeric_df.corr()
plt.figure(figsize=(10, 8))
sns.heatmap(correlation_matrix, annot=True, cmap='coolwarm', center=0)
plt.title('Correlation Matrix')
plt.show()
# Multivariate relationships
sns.pairplot(df[['age', 'income', 'education_years']], diag_kind='hist')
plt.show()
# Skewness and kurtosis
print("\nSkewness:")
print(numeric_df.skew())
print("\nKurtosis:")
print(numeric_df.kurtosis())
# Percentile analysis
print("\nPercentiles for Age:")
print(df['age'].quantile([0.25, 0.5, 0.75, 0.95, 0.99]))
# Missing data patterns
missing_pct = (df.isnull().sum() / len(df) * 100)
missing_pct[missing_pct > 0].sort_values(ascending=False)
# Value count analysis
print("\nCustomer Types Distribution:")
print(df['customer_type'].value_counts(normalize=True))
# Advanced EDA: Groupby analysis
print("\nGroupBy Analysis:")
print(df.groupby('region')[['age', 'income']].agg(['mean', 'median', 'std']))
# Correlation with target variable
if 'target' in df.columns:
target_corr = df.corr()['target'].sort_values(ascending=False)
print("\nFeature Correlation with Target:")
print(target_corr)
# Data type breakdown
print("\nData Type Summary:")
print(df.dtypes.value_counts())
# Unique value count
print("\nUnique Value Counts:")
print(df.nunique().sort_values(ascending=False))
# Variance analysis
print("\nVariance per Feature:")
numeric_cols = df.select_dtypes(include=[np.number]).columns
for col in numeric_cols:
variance = df[col].var()
print(f" {col}: {variance:.2f}")
# Distribution patterns
for col in df.select_dtypes(include=[np.number]).columns:
skew = df[col].skew()
kurt = df[col].kurtosis()
print(f"{col} - Skew: {skew:.2f}, Kurtosis: {kurt:.2f}")
# Bivariate analysis
fig, axes = plt.subplots(1, 2, figsize=(12, 4))
df.groupby('region')['income'].mean().plot(kind='bar', ax=axes[0])
axes[0].set_title('Average Income by Region')
df.groupby('category')['age'].mean().plot(kind='bar', ax=axes[1])
axes[1].set_title('Average Age by Category')
plt.tight_layout()
plt.show()
# Summary statistics profile
print("\nComprehensive Data Profile:")
profile = {
'Variable': df.columns,
'Type': df.dtypes,
'Non-Null Count': df.count(),
'Null Count': df.isnull().sum(),
'Unique Values': df.nunique(),
}
profile_df = pd.DataFrame(profile)
print(profile_df)
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
# Step 15: Interaction analysis
import itertools
numeric_cols = df.select_dtypes(include=[np.number]).columns
interaction_strengths = []
for col1, col2 in itertools.combinations(numeric_cols[:5], 2):
interaction_score = abs(df[col1].corr(df[col2]))
interaction_strengths.append({
'Pair': f"{col1} × {col2}",
'Correlation': interaction_score,
})
interaction_df = pd.DataFrame(interaction_strengths).sort_values('Correlation', ascending=False)
print("\nTop Interactions:")
print(interaction_df.head())
# Step 16: Outlier summary
for col in numeric_cols:
Q1, Q3 = df[col].quantile([0.25, 0.75])
IQR = Q3 - Q1
outliers = df[(df[col] < Q1 - 1.5*IQR) | (df[col] > Q3 + 1.5*IQR)]
if len(outliers) > 0:
print(f"\n{col}: {len(outliers)} outliers detected ({len(outliers)/len(df)*100:.1f}%)")
# Step 17: Generate automated insights
print("\n" + "="*60)
print("AUTOMATED DATA INSIGHTS")
print("="*60)
for col in numeric_cols:
skewness = df[col].skew()
mean_val = df[col].mean()
median_val = df[col].median()
if abs(skewness) > 1:
direction = "right" if skewness > 0 else "left"
print(f"{col}: Highly {direction}-skewed distribution")
if abs(mean_val - median_val) > 0.1 * median_val:
print(f"{col}: Mean and median differ significantly")
print("="*60)
每周安装数
0
代码仓库
GitHub 星标数
116
首次出现
1970年1月1日
安全审计
Exploratory Data Analysis (EDA) is the critical first step in data science projects, systematically examining datasets to understand their characteristics, identify patterns, and assess data quality before formal modeling.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Load and explore data
df = pd.read_csv('customer_data.csv')
# Basic profiling
print(f"Shape: {df.shape}")
print(f"Data types:\n{df.dtypes}")
print(f"Missing values:\n{df.isnull().sum()}")
print(f"Duplicates: {df.duplicated().sum()}")
# Statistical summary
print(df.describe())
print(df.describe(include='object'))
# Distribution analysis - numerical columns
fig, axes = plt.subplots(2, 2, figsize=(12, 8))
df['age'].hist(bins=30, ax=axes[0, 0])
axes[0, 0].set_title('Age Distribution')
df['income'].hist(bins=30, ax=axes[0, 1])
axes[0, 1].set_title('Income Distribution')
# Box plots for outlier detection
df.boxplot(column='age', by='region', ax=axes[1, 0])
axes[1, 0].set_title('Age by Region')
# Categorical analysis
df['category'].value_counts().plot(kind='bar', ax=axes[1, 1])
axes[1, 1].set_title('Category Distribution')
plt.tight_layout()
plt.show()
# Correlation analysis
numeric_df = df.select_dtypes(include=[np.number])
correlation_matrix = numeric_df.corr()
plt.figure(figsize=(10, 8))
sns.heatmap(correlation_matrix, annot=True, cmap='coolwarm', center=0)
plt.title('Correlation Matrix')
plt.show()
# Multivariate relationships
sns.pairplot(df[['age', 'income', 'education_years']], diag_kind='hist')
plt.show()
# Skewness and kurtosis
print("\nSkewness:")
print(numeric_df.skew())
print("\nKurtosis:")
print(numeric_df.kurtosis())
# Percentile analysis
print("\nPercentiles for Age:")
print(df['age'].quantile([0.25, 0.5, 0.75, 0.95, 0.99]))
# Missing data patterns
missing_pct = (df.isnull().sum() / len(df) * 100)
missing_pct[missing_pct > 0].sort_values(ascending=False)
# Value count analysis
print("\nCustomer Types Distribution:")
print(df['customer_type'].value_counts(normalize=True))
# Advanced EDA: Groupby analysis
print("\nGroupBy Analysis:")
print(df.groupby('region')[['age', 'income']].agg(['mean', 'median', 'std']))
# Correlation with target variable
if 'target' in df.columns:
target_corr = df.corr()['target'].sort_values(ascending=False)
print("\nFeature Correlation with Target:")
print(target_corr)
# Data type breakdown
print("\nData Type Summary:")
print(df.dtypes.value_counts())
# Unique value count
print("\nUnique Value Counts:")
print(df.nunique().sort_values(ascending=False))
# Variance analysis
print("\nVariance per Feature:")
numeric_cols = df.select_dtypes(include=[np.number]).columns
for col in numeric_cols:
variance = df[col].var()
print(f" {col}: {variance:.2f}")
# Distribution patterns
for col in df.select_dtypes(include=[np.number]).columns:
skew = df[col].skew()
kurt = df[col].kurtosis()
print(f"{col} - Skew: {skew:.2f}, Kurtosis: {kurt:.2f}")
# Bivariate analysis
fig, axes = plt.subplots(1, 2, figsize=(12, 4))
df.groupby('region')['income'].mean().plot(kind='bar', ax=axes[0])
axes[0].set_title('Average Income by Region')
df.groupby('category')['age'].mean().plot(kind='bar', ax=axes[1])
axes[1].set_title('Average Age by Category')
plt.tight_layout()
plt.show()
# Summary statistics profile
print("\nComprehensive Data Profile:")
profile = {
'Variable': df.columns,
'Type': df.dtypes,
'Non-Null Count': df.count(),
'Null Count': df.isnull().sum(),
'Unique Values': df.nunique(),
}
profile_df = pd.DataFrame(profile)
print(profile_df)
# Step 15: Interaction analysis
import itertools
numeric_cols = df.select_dtypes(include=[np.number]).columns
interaction_strengths = []
for col1, col2 in itertools.combinations(numeric_cols[:5], 2):
interaction_score = abs(df[col1].corr(df[col2]))
interaction_strengths.append({
'Pair': f"{col1} × {col2}",
'Correlation': interaction_score,
})
interaction_df = pd.DataFrame(interaction_strengths).sort_values('Correlation', ascending=False)
print("\nTop Interactions:")
print(interaction_df.head())
# Step 16: Outlier summary
for col in numeric_cols:
Q1, Q3 = df[col].quantile([0.25, 0.75])
IQR = Q3 - Q1
outliers = df[(df[col] < Q1 - 1.5*IQR) | (df[col] > Q3 + 1.5*IQR)]
if len(outliers) > 0:
print(f"\n{col}: {len(outliers)} outliers detected ({len(outliers)/len(df)*100:.1f}%)")
# Step 17: Generate automated insights
print("\n" + "="*60)
print("AUTOMATED DATA INSIGHTS")
print("="*60)
for col in numeric_cols:
skewness = df[col].skew()
mean_val = df[col].mean()
median_val = df[col].median()
if abs(skewness) > 1:
direction = "right" if skewness > 0 else "left"
print(f"{col}: Highly {direction}-skewed distribution")
if abs(mean_val - median_val) > 0.1 * median_val:
print(f"{col}: Mean and median differ significantly")
print("="*60)
Weekly Installs
0
Repository
GitHub Stars
116
First Seen
Jan 1, 1970
Security Audits
专业SEO审计工具:全面网站诊断、技术SEO优化与页面分析指南
57,600 周安装