【Python×LightGBM入門】第5回:交差検証とアンサンブル学習

はじめに

機械学習モデルの汎化性能を正しく評価し、過学習を防ぐためには「交差検証」が不可欠です。また、複数のモデルを組み合わせる「アンサンブル学習」により、さらに高精度で安定した予測が可能になります。今回は、LightGBMを使った実践的な交差検証とアンサンブル学習について詳しく解説します。

データセットの準備

今回は、信用リスク評価(与信判定)を題材にします。金融業界でよく使われる二値分類問題です。

import pandas as pd
import numpy as np
import lightgbm as lgb
from sklearn.model_selection import KFold, StratifiedKFold, cross_val_score, train_test_split
from sklearn.metrics import roc_auc_score, accuracy_score, precision_recall_curve
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')

# データの生成
np.random.seed(42)
n_samples = 10000

# 顧客属性データ
data = pd.DataFrame({
    'age': np.random.randint(20, 70, n_samples),
    'annual_income': np.random.lognormal(10.5, 0.6, n_samples),  # 年収(対数正規分布)
    'employment_years': np.random.exponential(5, n_samples),  # 勤続年数
    'num_credit_cards': np.random.poisson(2, n_samples),  # クレジットカード枚数
    'num_loans': np.random.poisson(1, n_samples),  # ローン件数
    'credit_history_length': np.random.uniform(0, 30, n_samples),  # 信用履歴の長さ(年)
    'credit_utilization': np.random.beta(2, 5, n_samples),  # クレジット利用率
    'num_late_payments': np.random.negative_binomial(1, 0.3, n_samples),  # 延滞回数
    'debt_to_income': np.random.beta(2, 8, n_samples),  # 負債比率
    'has_mortgage': np.random.choice([0, 1], n_samples, p=[0.6, 0.4]),  # 住宅ローン有無
    'has_car_loan': np.random.choice([0, 1], n_samples, p=[0.7, 0.3]),  # 自動車ローン有無
    'education': np.random.choice(['High School', 'Bachelor', 'Master', 'PhD'], n_samples, 
                                 p=[0.3, 0.4, 0.25, 0.05]),
    'marital_status': np.random.choice(['Single', 'Married', 'Divorced'], n_samples,
                                      p=[0.3, 0.5, 0.2]),
    'employment_type': np.random.choice(['Full-time', 'Part-time', 'Self-employed', 'Unemployed'], 
                                       n_samples, p=[0.6, 0.15, 0.2, 0.05])
})

# デフォルト(債務不履行)フラグの生成
# リスク要因に基づいた確率を計算
default_prob = (
    0.1 +  # ベース確率
    0.3 * (data['debt_to_income'] > 0.4) +
    0.2 * (data['num_late_payments'] > 2) +
    0.15 * (data['credit_utilization'] > 0.8) +
    0.1 * (data['employment_type'] == 'Unemployed') -
    0.1 * (data['annual_income'] > 100000) -
    0.05 * (data['credit_history_length'] > 10)
)
default_prob = np.clip(default_prob, 0, 1)
data['default'] = np.random.binomial(1, default_prob)

print(f"データセットのサイズ: {data.shape}")
print(f"デフォルト率: {data['default'].mean():.2%}")
print("\nクラスバランス:")
print(data['default'].value_counts())

交差検証の基礎

1. K-Fold交差検証

from sklearn.preprocessing import LabelEncoder

# カテゴリ変数のエンコーディング
categorical_cols = ['education', 'marital_status', 'employment_type']
label_encoders = {}

for col in categorical_cols:
    le = LabelEncoder()
    data[col + '_encoded'] = le.fit_transform(data[col])
    label_encoders[col] = le

# 特徴量とターゲットの準備
feature_cols = [col for col in data.columns if col not in ['default'] + categorical_cols]
X = data[feature_cols]
y = data['default']

# 基本的なK-Fold交差検証
def basic_kfold_cv(X, y, n_splits=5):
    """基本的なK-Fold交差検証"""
    kf = KFold(n_splits=n_splits, shuffle=True, random_state=42)
    scores = []
    
    for fold, (train_idx, val_idx) in enumerate(kf.split(X)):
        print(f"Fold {fold + 1}/{n_splits}")
        
        # データの分割
        X_train, X_val = X.iloc[train_idx], X.iloc[val_idx]
        y_train, y_val = y.iloc[train_idx], y.iloc[val_idx]
        
        # LightGBMモデルの学習
        train_data = lgb.Dataset(X_train, label=y_train)
        val_data = lgb.Dataset(X_val, label=y_val, reference=train_data)
        
        params = {
            'objective': 'binary',
            'metric': 'auc',
            'boosting_type': 'gbdt',
            'num_leaves': 31,
            'learning_rate': 0.05,
            'feature_fraction': 0.9,
            'bagging_fraction': 0.8,
            'bagging_freq': 5,
            'verbose': -1,
            'random_state': 42
        }
        
        model = lgb.train(
            params,
            train_data,
            valid_sets=[val_data],
            num_boost_round=300,
            callbacks=[lgb.early_stopping(50), lgb.log_evaluation(0)]
        )
        
        # 予測と評価
        y_pred = model.predict(X_val, num_iteration=model.best_iteration)
        score = roc_auc_score(y_val, y_pred)
        scores.append(score)
        print(f"  AUC: {score:.4f}")
    
    print(f"\n平均AUC: {np.mean(scores):.4f} (+/- {np.std(scores):.4f})")
    return scores

# 実行
scores = basic_kfold_cv(X, y)

2. Stratified K-Fold交差検証

クラス不均衡なデータでは、層化抽出を使用します。

def stratified_kfold_cv(X, y, n_splits=5):
    """層化K-Fold交差検証"""
    skf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)
    scores = []
    models = []  # 各foldのモデルを保存
    
    for fold, (train_idx, val_idx) in enumerate(skf.split(X, y)):
        print(f"Stratified Fold {fold + 1}/{n_splits}")
        
        X_train, X_val = X.iloc[train_idx], X.iloc[val_idx]
        y_train, y_val = y.iloc[train_idx], y.iloc[val_idx]
        
        # クラスバランスの確認
        print(f"  訓練データのデフォルト率: {y_train.mean():.2%}")
        print(f"  検証データのデフォルト率: {y_val.mean():.2%}")
        
        train_data = lgb.Dataset(X_train, label=y_train)
        val_data = lgb.Dataset(X_val, label=y_val, reference=train_data)
        
        params = {
            'objective': 'binary',
            'metric': 'auc',
            'boosting_type': 'gbdt',
            'num_leaves': 31,
            'learning_rate': 0.05,
            'feature_fraction': 0.9,
            'bagging_fraction': 0.8,
            'bagging_freq': 5,
            'verbose': -1,
            'random_state': 42 + fold,  # 各foldで異なるseed
            'is_unbalance': True  # クラス不均衡を考慮
        }
        
        model = lgb.train(
            params,
            train_data,
            valid_sets=[val_data],
            num_boost_round=300,
            callbacks=[lgb.early_stopping(50), lgb.log_evaluation(0)]
        )
        
        models.append(model)
        
        y_pred = model.predict(X_val, num_iteration=model.best_iteration)
        score = roc_auc_score(y_val, y_pred)
        scores.append(score)
        print(f"  AUC: {score:.4f}")
    
    print(f"\n層化K-Fold平均AUC: {np.mean(scores):.4f} (+/- {np.std(scores):.4f})")
    return scores, models

# 実行
stratified_scores, cv_models = stratified_kfold_cv(X, y)

3. 時系列交差検証

時系列データの場合は、時間的な順序を考慮した交差検証を行います。

from sklearn.model_selection import TimeSeriesSplit

def time_series_cv(X, y, n_splits=5):
    """時系列交差検証(擬似的な時系列を想定)"""
    tscv = TimeSeriesSplit(n_splits=n_splits)
    scores = []
    
    # データに擬似的な時間順序を追加
    X_sorted = X.copy()
    X_sorted['pseudo_time'] = range(len(X))
    
    plt.figure(figsize=(12, 6))
    
    for fold, (train_idx, val_idx) in enumerate(tscv.split(X)):
        print(f"Time Series Fold {fold + 1}/{n_splits}")
        print(f"  訓練データ: {len(train_idx)} samples")
        print(f"  検証データ: {len(val_idx)} samples")
        
        # 視覚化
        plt.subplot(n_splits, 1, fold + 1)
        plt.scatter(train_idx, [fold] * len(train_idx), c='blue', s=1, label='Train')
        plt.scatter(val_idx, [fold] * len(val_idx), c='red', s=1, label='Val')
        plt.ylabel(f'Fold {fold + 1}')
        if fold == 0:
            plt.legend()
        
        # モデル学習と評価(省略)
    
    plt.xlabel('Sample Index')
    plt.tight_layout()
    plt.show()

# 実行(視覚化のみ)
time_series_cv(X, y, n_splits=5)

高度な交差検証テクニック

1. ネストした交差検証

def nested_cv(X, y, param_grid, inner_cv=3, outer_cv=5):
    """ハイパーパラメータチューニングを含むネストした交差検証"""
    outer_scores = []
    outer_kf = StratifiedKFold(n_splits=outer_cv, shuffle=True, random_state=42)
    
    for outer_fold, (train_idx, test_idx) in enumerate(outer_kf.split(X, y)):
        print(f"\nOuter Fold {outer_fold + 1}/{outer_cv}")
        
        X_train_outer, X_test_outer = X.iloc[train_idx], X.iloc[test_idx]
        y_train_outer, y_test_outer = y.iloc[train_idx], y.iloc[test_idx]
        
        # 内側の交差検証でハイパーパラメータを選択
        best_score = -np.inf
        best_params = None
        
        inner_kf = StratifiedKFold(n_splits=inner_cv, shuffle=True, random_state=42)
        
        for params in param_grid:
            inner_scores = []
            
            for train_idx_inner, val_idx_inner in inner_kf.split(X_train_outer, y_train_outer):
                X_train_inner = X_train_outer.iloc[train_idx_inner]
                X_val_inner = X_train_outer.iloc[val_idx_inner]
                y_train_inner = y_train_outer.iloc[train_idx_inner]
                y_val_inner = y_train_outer.iloc[val_idx_inner]
                
                train_data = lgb.Dataset(X_train_inner, label=y_train_inner)
                val_data = lgb.Dataset(X_val_inner, label=y_val_inner)
                
                model = lgb.train(
                    params,
                    train_data,
                    valid_sets=[val_data],
                    num_boost_round=200,
                    callbacks=[lgb.early_stopping(30), lgb.log_evaluation(0)]
                )
                
                y_pred = model.predict(X_val_inner, num_iteration=model.best_iteration)
                score = roc_auc_score(y_val_inner, y_pred)
                inner_scores.append(score)
            
            mean_inner_score = np.mean(inner_scores)
            if mean_inner_score > best_score:
                best_score = mean_inner_score
                best_params = params
        
        print(f"  最適パラメータ: {best_params}")
        print(f"  内側CV平均スコア: {best_score:.4f}")
        
        # 最適パラメータで外側のテストデータを評価
        train_data = lgb.Dataset(X_train_outer, label=y_train_outer)
        final_model = lgb.train(
            best_params,
            train_data,
            num_boost_round=300,
            callbacks=[lgb.log_evaluation(0)]
        )
        
        y_pred_outer = final_model.predict(X_test_outer)
        outer_score = roc_auc_score(y_test_outer, y_pred_outer)
        outer_scores.append(outer_score)
        print(f"  外側テストスコア: {outer_score:.4f}")
    
    print(f"\n最終スコア: {np.mean(outer_scores):.4f} (+/- {np.std(outer_scores):.4f})")
    return outer_scores

# パラメータグリッドの定義
param_grid = [
    {
        'objective': 'binary',
        'metric': 'auc',
        'num_leaves': 31,
        'learning_rate': 0.05,
        'feature_fraction': 0.9,
        'verbose': -1
    },
    {
        'objective': 'binary',
        'metric': 'auc',
        'num_leaves': 50,
        'learning_rate': 0.1,
        'feature_fraction': 0.8,
        'verbose': -1
    }
]

# 実行(簡略版)
# nested_scores = nested_cv(X, y, param_grid, inner_cv=2, outer_cv=3)

2. Out-of-Fold予測

def get_oof_predictions(X, y, n_splits=5):
    """Out-of-Fold予測を取得"""
    kf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)
    
    # OOF予測を格納する配列
    oof_predictions = np.zeros(len(X))
    feature_importance_list = []
    
    params = {
        'objective': 'binary',
        'metric': 'auc',
        'boosting_type': 'gbdt',
        'num_leaves': 31,
        'learning_rate': 0.05,
        'feature_fraction': 0.9,
        'bagging_fraction': 0.8,
        'bagging_freq': 5,
        'verbose': -1,
        'random_state': 42
    }
    
    for fold, (train_idx, val_idx) in enumerate(kf.split(X, y)):
        X_train, X_val = X.iloc[train_idx], X.iloc[val_idx]
        y_train, y_val = y.iloc[train_idx], y.iloc[val_idx]
        
        train_data = lgb.Dataset(X_train, label=y_train)
        val_data = lgb.Dataset(X_val, label=y_val, reference=train_data)
        
        model = lgb.train(
            params,
            train_data,
            valid_sets=[val_data],
            num_boost_round=300,
            callbacks=[lgb.early_stopping(50), lgb.log_evaluation(0)]
        )
        
        # OOF予測を保存
        oof_predictions[val_idx] = model.predict(X_val, num_iteration=model.best_iteration)
        
        # 特徴量重要度を保存
        importance = pd.DataFrame({
            'feature': X.columns,
            'importance': model.feature_importance(importance_type='gain'),
            'fold': fold
        })
        feature_importance_list.append(importance)
    
    # 全体のOOFスコア
    oof_score = roc_auc_score(y, oof_predictions)
    print(f"OOF AUC Score: {oof_score:.4f}")
    
    # 特徴量重要度の集計
    feature_importance = pd.concat(feature_importance_list)
    feature_importance_agg = feature_importance.groupby('feature')['importance'].agg(['mean', 'std'])
    feature_importance_agg = feature_importance_agg.sort_values('mean', ascending=False)
    
    # 可視化
    plt.figure(figsize=(10, 8))
    top_features = feature_importance_agg.head(15)
    plt.barh(range(len(top_features)), top_features['mean'], xerr=top_features['std'])
    plt.yticks(range(len(top_features)), top_features.index)
    plt.xlabel('重要度')
    plt.title('特徴量重要度(平均±標準偏差)')
    plt.gca().invert_yaxis()
    plt.tight_layout()
    plt.show()
    
    return oof_predictions, feature_importance_agg

# 実行
oof_preds, feature_importance = get_oof_predictions(X, y)

アンサンブル学習

1. 単純な平均アンサンブル

def simple_averaging_ensemble(models, X_test):
    """複数モデルの予測を単純平均"""
    predictions = []
    
    for i, model in enumerate(models):
        pred = model.predict(X_test, num_iteration=model.best_iteration)
        predictions.append(pred)
        print(f"モデル{i+1}の予測完了")
    
    # 単純平均
    ensemble_pred = np.mean(predictions, axis=0)
    
    # 各モデルの予測の相関を確認
    pred_df = pd.DataFrame(predictions).T
    pred_df.columns = [f'Model_{i+1}' for i in range(len(models))]
    
    plt.figure(figsize=(8, 6))
    sns.heatmap(pred_df.corr(), annot=True, cmap='coolwarm', center=0.5)
    plt.title('モデル間の予測相関')
    plt.tight_layout()
    plt.show()
    
    return ensemble_pred

# テストデータの準備
X_train_full, X_test_full, y_train_full, y_test_full = train_test_split(
    X, y, test_size=0.2, random_state=42, stratify=y
)

# アンサンブル予測
ensemble_predictions = simple_averaging_ensemble(cv_models, X_test_full)
ensemble_score = roc_auc_score(y_test_full, ensemble_predictions)
print(f"\nアンサンブルAUC: {ensemble_score:.4f}")

# 個別モデルと比較
for i, model in enumerate(cv_models):
    single_pred = model.predict(X_test_full, num_iteration=model.best_iteration)
    single_score = roc_auc_score(y_test_full, single_pred)
    print(f"モデル{i+1} AUC: {single_score:.4f}")

2. 重み付き平均アンサンブル

def weighted_averaging_ensemble(models, X_test, X_val, y_val):
    """検証データでの性能に基づく重み付き平均"""
    weights = []
    predictions_test = []
    
    # 各モデルの検証スコアを計算
    for model in models:
        pred_val = model.predict(X_val, num_iteration=model.best_iteration)
        score = roc_auc_score(y_val, pred_val)
        weights.append(score)
        
        pred_test = model.predict(X_test, num_iteration=model.best_iteration)
        predictions_test.append(pred_test)
    
    # 重みの正規化
    weights = np.array(weights)
    weights = weights / weights.sum()
    
    print("モデルの重み:", weights)
    
    # 重み付き平均
    weighted_pred = np.average(predictions_test, axis=0, weights=weights)
    
    return weighted_pred

# 検証用データの準備
X_val_ensemble, X_test_ensemble, y_val_ensemble, y_test_ensemble = train_test_split(
    X_test_full, y_test_full, test_size=0.5, random_state=42, stratify=y_test_full
)

# 重み付きアンサンブル
weighted_predictions = weighted_averaging_ensemble(
    cv_models, X_test_ensemble, X_val_ensemble, y_val_ensemble
)
weighted_score = roc_auc_score(y_test_ensemble, weighted_predictions)
print(f"\n重み付きアンサンブルAUC: {weighted_score:.4f}")

3. スタッキング

from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier

def stacking_ensemble(X_train, y_train, X_test, base_models=None):
    """スタッキングアンサンブル"""
    if base_models is None:
        base_models = {
            'lgb': lgb.LGBMClassifier(n_estimators=100, random_state=42),
            'rf': RandomForestClassifier(n_estimators=100, random_state=42),
            'xgb': XGBClassifier(n_estimators=100, random_state=42)
        }
    
    # レベル1: ベースモデルの学習とOOF予測
    train_meta_features = []
    test_meta_features = []
    
    kf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
    
    for name, model in base_models.items():
        print(f"\n{name}モデルの学習...")
        
        # OOF予測
        oof_pred = np.zeros(len(X_train))
        test_pred = np.zeros(len(X_test))
        
        for train_idx, val_idx in kf.split(X_train, y_train):
            X_fold_train = X_train.iloc[train_idx]
            y_fold_train = y_train.iloc[train_idx]
            X_fold_val = X_train.iloc[val_idx]
            
            # モデルのコピーを作成
            model_clone = model.__class__(**model.get_params())
            model_clone.fit(X_fold_train, y_fold_train)
            
            # OOF予測
            if hasattr(model_clone, 'predict_proba'):
                oof_pred[val_idx] = model_clone.predict_proba(X_fold_val)[:, 1]
                test_pred += model_clone.predict_proba(X_test)[:, 1] / 5
            else:
                oof_pred[val_idx] = model_clone.predict(X_fold_val)
                test_pred += model_clone.predict(X_test) / 5
        
        train_meta_features.append(oof_pred)
        test_meta_features.append(test_pred)
        
        # OOFスコア
        oof_score = roc_auc_score(y_train, oof_pred)
        print(f"{name} OOF AUC: {oof_score:.4f}")
    
    # メタ特徴量の作成
    train_meta = np.column_stack(train_meta_features)
    test_meta = np.column_stack(test_meta_features)
    
    # レベル2: メタモデルの学習
    print("\nメタモデルの学習...")
    meta_model = LogisticRegression(random_state=42)
    meta_model.fit(train_meta, y_train)
    
    # 最終予測
    final_pred = meta_model.predict_proba(test_meta)[:, 1]
    
    # メタ特徴量の相関を可視化
    meta_df = pd.DataFrame(train_meta, columns=list(base_models.keys()))
    plt.figure(figsize=(8, 6))
    sns.heatmap(meta_df.corr(), annot=True, cmap='coolwarm')
    plt.title('ベースモデルの予測相関')
    plt.tight_layout()
    plt.show()
    
    return final_pred, meta_model

# スタッキングの実行
stacking_pred, meta_model = stacking_ensemble(X_train_full, y_train_full, X_test_full)
stacking_score = roc_auc_score(y_test_full, stacking_pred)
print(f"\nスタッキングアンサンブルAUC: {stacking_score:.4f}")

4. ブレンディング

def blending_ensemble(X_train, y_train, X_test, blend_size=0.2):
    """ブレンディングアンサンブル(ホールドアウト検証セットを使用)"""
    # ブレンド用のホールドアウトセットを作成
    X_blend, X_train_blend, y_blend, y_train_blend = train_test_split(
        X_train, y_train, test_size=1-blend_size, random_state=42, stratify=y_train
    )
    
    print(f"ブレンド用データサイズ: {X_blend.shape}")
    print(f"学習用データサイズ: {X_train_blend.shape}")
    
    # ベースモデルの定義
    base_models = {
        'lgb1': lgb.LGBMClassifier(n_estimators=100, num_leaves=31, random_state=42),
        'lgb2': lgb.LGBMClassifier(n_estimators=100, num_leaves=50, random_state=43),
        'lgb3': lgb.LGBMClassifier(n_estimators=100, learning_rate=0.1, random_state=44)
    }
    
    # ベースモデルの学習とブレンド用予測
    blend_features = []
    test_features = []
    
    for name, model in base_models.items():
        print(f"\n{name}の学習...")
        
        # 学習
        model.fit(X_train_blend, y_train_blend)
        
        # ブレンド用予測
        blend_pred = model.predict_proba(X_blend)[:, 1]
        blend_features.append(blend_pred)
        
        # テスト用予測
        test_pred = model.predict_proba(X_test)[:, 1]
        test_features.append(test_pred)
        
        # 性能評価
        score = roc_auc_score(y_blend, blend_pred)
        print(f"{name} Blend AUC: {score:.4f}")
    
    # ブレンド特徴量の作成
    X_blend_meta = np.column_stack(blend_features)
    X_test_meta = np.column_stack(test_features)
    
    # ブレンダーの学習
    print("\nブレンダーの学習...")
    blender = LogisticRegression(random_state=42)
    blender.fit(X_blend_meta, y_blend)
    
    # 最終予測
    final_pred = blender.predict_proba(X_test_meta)[:, 1]
    
    return final_pred

# ブレンディングの実行
blending_pred = blending_ensemble(X_train_full, y_train_full, X_test_full)
blending_score = roc_auc_score(y_test_full, blending_pred)
print(f"\nブレンディングアンサンブルAUC: {blending_score:.4f}")

アンサンブルの評価と比較

# すべての手法の結果を比較
results = pd.DataFrame({
    '手法': ['単一モデル(平均)', '単純平均', '重み付き平均', 'スタッキング', 'ブレンディング'],
    'AUC': [
        np.mean([roc_auc_score(y_test_full, model.predict(X_test_full, num_iteration=model.best_iteration)) 
                for model in cv_models]),
        ensemble_score,
        weighted_score,
        stacking_score,
        blending_score
    ]
})

# 可視化
plt.figure(figsize=(10, 6))
plt.bar(results['手法'], results['AUC'])
plt.ylim(min(results['AUC']) * 0.95, max(results['AUC']) * 1.02)
plt.ylabel('AUC')
plt.title('アンサンブル手法の比較')
for i, v in enumerate(results['AUC']):
    plt.text(i, v + 0.002, f'{v:.4f}', ha='center')
plt.tight_layout()
plt.show()

print(results)

実務的なTips

1. 交差検証の安定性チェック

def cv_stability_check(X, y, n_repeats=5):
    """交差検証の安定性をチェック"""
    all_scores = []
    
    for repeat in range(n_repeats):
        kf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42 + repeat)
        scores = []
        
        for train_idx, val_idx in kf.split(X, y):
            X_train, X_val = X.iloc[train_idx], X.iloc[val_idx]
            y_train, y_val = y.iloc[train_idx], y.iloc[val_idx]
            
            model = lgb.LGBMClassifier(n_estimators=100, random_state=42)
            model.fit(X_train, y_train)
            
            y_pred = model.predict_proba(X_val)[:, 1]
            score = roc_auc_score(y_val, y_pred)
            scores.append(score)
        
        all_scores.append(scores)
    
    # 結果の可視化
    plt.figure(figsize=(10, 6))
    for i, scores in enumerate(all_scores):
        plt.plot(scores, marker='o', label=f'Repeat {i+1}')
    plt.xlabel('Fold')
    plt.ylabel('AUC')
    plt.title('交差検証の安定性')
    plt.legend()
    plt.grid(True)
    plt.tight_layout()
    plt.show()
    
    # 統計量
    all_scores_flat = [score for scores in all_scores for score in scores]
    print(f"全体の平均: {np.mean(all_scores_flat):.4f}")
    print(f"全体の標準偏差: {np.std(all_scores_flat):.4f}")

2. アンサンブルの多様性分析

def ensemble_diversity_analysis(predictions_list):
    """アンサンブルメンバーの多様性を分析"""
    n_models = len(predictions_list)
    
    # ペアワイズ相関
    correlations = []
    for i in range(n_models):
        for j in range(i+1, n_models):
            corr = np.corrcoef(predictions_list[i], predictions_list[j])[0, 1]
            correlations.append(corr)
    
    print(f"平均相関: {np.mean(correlations):.4f}")
    print(f"最小相関: {np.min(correlations):.4f}")
    print(f"最大相関: {np.max(correlations):.4f}")
    
    # 多様性が高いほど良いアンサンブル
    diversity_score = 1 - np.mean(correlations)
    print(f"\n多様性スコア: {diversity_score:.4f}")
    
    return diversity_score

まとめ

今回は、LightGBMを使った交差検証とアンサンブル学習について詳しく学びました。重要なポイントは:

  1. 交差検証の種類: K-Fold、Stratified K-Fold、時系列交差検証
  2. 高度な交差検証: ネストCV、Out-of-Fold予測
  3. アンサンブル手法: 平均、重み付き平均、スタッキング、ブレンディング
  4. モデルの多様性: 異なるアンサンブルメンバーが重要
  5. 安定性の確認: 複数回の交差検証で結果の再現性を確認

次回は最終回として「実践的な応用例とベストプラクティス」について、実際のプロジェクトで使えるテクニックを解説します。

演習問題

  1. GroupKFoldを使って、特定のグループ(例:顧客ID)でデータが分かれるような交差検証を実装してみましょう
  2. 異なる種類のモデル(決定木、SVM等)を組み合わせたアンサンブルを作ってみましょう
  3. ベイズ最適化を使ったアンサンブルの重み最適化を実装してみましょう

ご質問やフィードバックがございましたら、ぜひコメント欄でお知らせください。次回もお楽しみに!

技術的な課題をお持ちですか専門チームがサポートします

記事でご紹介した技術や実装について、
より詳細なご相談やプロジェクトのサポートを承ります。