C#で機械学習モデルを本番運用する完全ガイド - ML.NETとONNXによる実践的アプローチ

C#で機械学習モデルを本番運用する完全ガイド - ML.NETとONNXによる実践的アプローチ

2025年6月26日

機械学習モデルの開発と本番運用は異なるスキルセットが要求されます。Pythonで開発されたモデルを、どのようにしてエンタープライズ環境で安定的に運用するか。この課題に対して、C#とML.NET、ONNXを活用したアプローチが注目を集めています。本記事では、C#による機械学習モデル運用の利点と実装方法を詳しく解説します。

なぜC#で機械学習モデルを運用するのか

1. エンタープライズ環境との親和性

多くの企業システムは.NETベースで構築されており、C#による機械学習モデル運用は既存インフラとシームレスに統合できます。

// 既存の.NETアプリケーションに機械学習を組み込む例
public class CustomerChurnPredictionService
{
    private readonly MLContext _mlContext;
    private readonly ITransformer _model;
    private readonly ILogger<CustomerChurnPredictionService> _logger;
    private readonly IDbContext _dbContext;

    public CustomerChurnPredictionService(
        ILogger<CustomerChurnPredictionService> logger,
        IDbContext dbContext)
    {
        _logger = logger;
        _dbContext = dbContext;
        _mlContext = new MLContext(seed: 1);
        
        // 既存のDIコンテナから設定を取得
        var modelPath = Configuration["ML:ChurnModel:Path"];
        _model = _mlContext.Model.Load(modelPath, out _);
    }

    public async Task<ChurnPrediction> PredictCustomerChurnAsync(int customerId)
    {
        // 既存のデータベースからデータを取得
        var customer = await _dbContext.Customers
            .Include(c => c.Orders)
            .Include(c => c.SupportTickets)
            .FirstOrDefaultAsync(c => c.Id == customerId);

        var features = ExtractFeatures(customer);
        var prediction = _mlContext.Model
            .CreatePredictionEngine<CustomerFeatures, ChurnPrediction>(_model)
            .Predict(features);

        // 監査ログの記録
        await _dbContext.PredictionLogs.AddAsync(new PredictionLog
        {
            CustomerId = customerId,
            PredictionType = "Churn",
            Score = prediction.Score,
            Timestamp = DateTime.UtcNow
        });

        return prediction;
    }
}

2. 型安全性とコンパイル時チェック

C#の強力な型システムにより、モデルの入出力を明確に定義し、実行時エラーを防ぎます。

// 型安全なモデル入力の定義
public class SalesFeatures
{
    [LoadColumn(0)]
    public float PreviousSales { get; set; }

    [LoadColumn(1)]
    public float MarketingSpend { get; set; }

    [LoadColumn(2)]
    public float CompetitorPrice { get; set; }

    [LoadColumn(3)]
    public float Seasonality { get; set; }

    [LoadColumn(4)]
    public string ProductCategory { get; set; }

    // バリデーションロジック
    public ValidationResult Validate()
    {
        var errors = new List<string>();

        if (PreviousSales < 0)
            errors.Add("Previous sales cannot be negative");

        if (MarketingSpend < 0)
            errors.Add("Marketing spend cannot be negative");

        if (string.IsNullOrWhiteSpace(ProductCategory))
            errors.Add("Product category is required");

        return new ValidationResult
        {
            IsValid = !errors.Any(),
            Errors = errors
        };
    }
}

// 型安全な予測結果
public class SalesPrediction
{
    [ColumnName("Score")]
    public float PredictedSales { get; set; }

    [ColumnName("Features")]
    public float[] FeatureContributions { get; set; }

    public string GetExplanation()
    {
        var topFeatures = FeatureContributions
            .Select((value, index) => new { Index = index, Value = value })
            .OrderByDescending(f => Math.Abs(f.Value))
            .Take(3);

        return $"予測売上: {PredictedSales:C}, " +
               $"主要因: {string.Join(", ", topFeatures.Select(f => $"Feature{f.Index}: {f.Value:F2}"))}";
    }
}

3. 統合されたデバッグとモニタリング

Visual StudioやApplication Insightsとの統合により、機械学習モデルのデバッグとモニタリングが容易です。

public class MLModelTelemetry
{
    private readonly TelemetryClient _telemetryClient;
    private readonly ILogger<MLModelTelemetry> _logger;

    public async Task<TPrediction> PredictWithTelemetryAsync<TFeatures, TPrediction>(
        ITransformer model,
        TFeatures features,
        string modelName)
        where TFeatures : class
        where TPrediction : class, new()
    {
        using var activity = Activity.StartActivity("ML.Predict");
        activity?.SetTag("model.name", modelName);

        var stopwatch = Stopwatch.StartNew();
        
        try
        {
            var predictionEngine = _mlContext.Model
                .CreatePredictionEngine<TFeatures, TPrediction>(model);
            
            var prediction = predictionEngine.Predict(features);
            
            // パフォーマンスメトリクスの記録
            _telemetryClient.TrackMetric($"{modelName}.PredictionLatency", 
                stopwatch.ElapsedMilliseconds);
            
            // 予測の分布を追跡
            if (prediction is IScorePrediction scorePrediction)
            {
                _telemetryClient.TrackMetric($"{modelName}.PredictionScore", 
                    scorePrediction.Score);
            }

            return prediction;
        }
        catch (Exception ex)
        {
            _logger.LogError(ex, "Prediction failed for model {ModelName}", modelName);
            _telemetryClient.TrackException(ex, new Dictionary<string, string>
            {
                ["ModelName"] = modelName,
                ["FeatureType"] = typeof(TFeatures).Name
            });
            throw;
        }
        finally
        {
            activity?.SetTag("prediction.duration_ms", stopwatch.ElapsedMilliseconds);
        }
    }
}

ML.NETの実践的活用

1. モデルトレーニングパイプライン

ML.NETは、データ前処理からモデル評価まで、完全なMLパイプラインを提供します。

public class MLTrainingPipeline
{
    private readonly MLContext _mlContext;
    
    public async Task<TrainingResult> TrainModelAsync(string dataPath)
    {
        // データの読み込み
        var dataView = _mlContext.Data.LoadFromTextFile<ModelInput>(
            dataPath,
            hasHeader: true,
            separatorChar: ',');

        // データ分割
        var dataSplit = _mlContext.Data.TrainTestSplit(dataView, testFraction: 0.2);

        // 特徴エンジニアリングパイプライン
        var preprocessingPipeline = _mlContext.Transforms.Text
            .FeaturizeText("TextFeatures", nameof(ModelInput.Description))
            .Append(_mlContext.Transforms.Categorical.OneHotEncoding("CategoryEncoded", 
                nameof(ModelInput.Category)))
            .Append(_mlContext.Transforms.NormalizeMeanVariance("NormalizedPrice", 
                nameof(ModelInput.Price)))
            .Append(_mlContext.Transforms.Concatenate("Features",
                "TextFeatures", "CategoryEncoded", "NormalizedPrice",
                nameof(ModelInput.Quantity), nameof(ModelInput.DayOfWeek)));

        // 複数のアルゴリズムを試す
        var trainers = new IEstimator<ITransformer>[]
        {
            _mlContext.Regression.Trainers.LightGbm(
                labelColumnName: nameof(ModelInput.Label),
                featureColumnName: "Features"),
            
            _mlContext.Regression.Trainers.FastTree(
                labelColumnName: nameof(ModelInput.Label),
                featureColumnName: "Features",
                numberOfTrees: 100,
                minimumExampleCountPerLeaf: 10),
            
            _mlContext.Regression.Trainers.FastForest(
                labelColumnName: nameof(ModelInput.Label),
                featureColumnName: "Features")
        };

        var bestModel = default(ITransformer);
        var bestMetrics = default(RegressionMetrics);
        var bestTrainerName = string.Empty;

        // 各アルゴリズムを評価
        foreach (var (trainer, name) in trainers.Select((t, i) => (t, $"Trainer{i}")))
        {
            var pipeline = preprocessingPipeline.Append(trainer);
            var model = pipeline.Fit(dataSplit.TrainSet);
            
            var predictions = model.Transform(dataSplit.TestSet);
            var metrics = _mlContext.Regression.Evaluate(predictions, 
                labelColumnName: nameof(ModelInput.Label));

            _logger.LogInformation(
                "Model {Name} - RMSE: {RMSE}, R2: {R2}, MAE: {MAE}",
                name, metrics.RootMeanSquaredError, metrics.RSquared, 
                metrics.MeanAbsoluteError);

            if (bestMetrics == null || metrics.RootMeanSquaredError < bestMetrics.RootMeanSquaredError)
            {
                bestModel = model;
                bestMetrics = metrics;
                bestTrainerName = name;
            }
        }

        // モデルの保存
        var modelPath = Path.Combine("models", $"best_model_{DateTime.Now:yyyyMMddHHmmss}.zip");
        _mlContext.Model.Save(bestModel, dataView.Schema, modelPath);

        return new TrainingResult
        {
            ModelPath = modelPath,
            TrainerName = bestTrainerName,
            Metrics = bestMetrics,
            TrainingDate = DateTime.UtcNow
        };
    }
}

2. AutoMLによる自動化

ML.NET AutoMLを使用して、最適なアルゴリズムとハイパーパラメータを自動的に選択できます。

public class AutoMLService
{
    public async Task<AutoMLResult> RunAutoMLExperimentAsync(
        string dataPath,
        uint experimentTimeInSeconds = 600)
    {
        var mlContext = new MLContext();
        
        // データの読み込み
        var dataView = mlContext.Data.LoadFromTextFile<ProductDemand>(
            dataPath, hasHeader: true, separatorChar: ',');

        // AutoML実験の設定
        var experimentSettings = new RegressionExperimentSettings
        {
            MaxExperimentTimeInSeconds = experimentTimeInSeconds,
            OptimizingMetric = RegressionMetric.RootMeanSquaredError,
            CacheDirectoryName = "MLCache"
        };

        // プログレスハンドラー
        var progressHandler = new Progress<RunDetail<RegressionMetrics>>(
            progress =>
            {
                if (progress.ValidationMetrics != null)
                {
                    _logger.LogInformation(
                        "Trainer: {Trainer}, RMSE: {RMSE:F4}, R2: {R2:F4}",
                        progress.TrainerName,
                        progress.ValidationMetrics.RootMeanSquaredError,
                        progress.ValidationMetrics.RSquared);
                }
            });

        // AutoML実験の実行
        var experiment = mlContext.Auto()
            .CreateRegressionExperiment(experimentSettings);

        var result = await Task.Run(() => 
            experiment.Execute(
                dataView, 
                labelColumnName: nameof(ProductDemand.Demand),
                progressHandler: progressHandler));

        // 最良モデルの詳細情報
        _logger.LogInformation(
            "Best model: {Model}, RMSE: {RMSE:F4}, Training time: {Time:F2}s",
            result.BestRun.TrainerName,
            result.BestRun.ValidationMetrics.RootMeanSquaredError,
            result.BestRun.RuntimeInSeconds);

        // クロスバリデーション
        var crossValResults = mlContext.Regression.CrossValidate(
            dataView,
            result.BestRun.Estimator,
            numberOfFolds: 5,
            labelColumnName: nameof(ProductDemand.Demand));

        var avgRmse = crossValResults.Average(r => r.Metrics.RootMeanSquaredError);
        _logger.LogInformation("Cross-validation average RMSE: {RMSE:F4}", avgRmse);

        return new AutoMLResult
        {
            BestModel = result.BestRun.Model,
            TrainerName = result.BestRun.TrainerName,
            Metrics = result.BestRun.ValidationMetrics,
            CrossValidationScore = avgRmse
        };
    }
}

ONNXによるモデル互換性

1. Pythonモデルの変換とインポート

PyTorchやTensorFlowで訓練されたモデルをONNX形式に変換し、C#で利用できます。

# Python側: モデルをONNX形式にエクスポート
import torch
import torch.nn as nn
import torch.onnx

class CustomModel(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(CustomModel, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(0.2)
        self.fc2 = nn.Linear(hidden_size, output_size)
    
    def forward(self, x):
        x = self.fc1(x)
        x = self.relu(x)
        x = self.dropout(x)
        x = self.fc2(x)
        return x

# モデルのエクスポート
model = CustomModel(10, 64, 1)
model.load_state_dict(torch.load('model.pth'))
model.eval()

dummy_input = torch.randn(1, 10)
torch.onnx.export(model, 
                  dummy_input, 
                  "model.onnx",
                  input_names=['input'],
                  output_names=['output'],
                  dynamic_axes={'input': {0: 'batch_size'},
                               'output': {0: 'batch_size'}})
// C#側: ONNXモデルの読み込みと推論
public class ONNXModelService
{
    private readonly MLContext _mlContext;
    private readonly ITransformer _onnxModel;
    private readonly string _modelPath;

    public ONNXModelService(string modelPath)
    {
        _mlContext = new MLContext();
        _modelPath = modelPath;
        
        // ONNXモデルの読み込み
        var pipeline = _mlContext.Transforms
            .ApplyOnnxModel(
                modelFile: modelPath,
                outputColumnNames: new[] { "output" },
                inputColumnNames: new[] { "input" },
                gpuDeviceId: 0, // GPU使用(利用可能な場合)
                fallbackToCpu: true);

        // ダミーデータでパイプラインを初期化
        var dummyData = _mlContext.Data.LoadFromEnumerable(
            new[] { new ModelInput { Features = new float[10] } });
        
        _onnxModel = pipeline.Fit(dummyData);
    }

    public async Task<BatchPredictionResult> PredictBatchAsync(
        IEnumerable<float[]> inputs)
    {
        var modelInputs = inputs.Select(i => new ModelInput { Features = i });
        var dataView = _mlContext.Data.LoadFromEnumerable(modelInputs);
        
        var predictions = _onnxModel.Transform(dataView);
        
        var results = _mlContext.Data
            .CreateEnumerable<ModelOutput>(predictions, reuseRowObject: false)
            .ToList();

        return new BatchPredictionResult
        {
            Predictions = results.Select(r => r.Output[0]).ToArray(),
            ProcessedCount = results.Count,
            ModelVersion = GetModelVersion()
        };
    }

    // モデルメタデータの取得
    private string GetModelVersion()
    {
        using var session = new InferenceSession(_modelPath);
        var metadata = session.ModelMetadata;
        return metadata.Version?.ToString() ?? "Unknown";
    }
}

public class ModelInput
{
    [VectorType(10)]
    [ColumnName("input")]
    public float[] Features { get; set; }
}

public class ModelOutput
{
    [ColumnName("output")]
    public float[] Output { get; set; }
}

2. 複雑なONNXモデルの活用

画像認識や自然言語処理など、より複雑なモデルの実装例:

public class AdvancedONNXInference
{
    private readonly InferenceSession _session;
    private readonly string[] _labels;

    public AdvancedONNXInference(string modelPath, string labelsPath)
    {
        // ONNXランタイムセッションの作成
        var options = SessionOptions.MakeSessionOptionWithCudaProvider(0);
        _session = new InferenceSession(modelPath, options);
        _labels = File.ReadAllLines(labelsPath);
    }

    // 画像分類の例
    public async Task<ImageClassificationResult> ClassifyImageAsync(string imagePath)
    {
        // 画像の前処理
        using var image = await Image.LoadAsync<Rgb24>(imagePath);
        var processedImage = PreprocessImage(image);
        
        // テンソルの作成
        var inputTensor = new DenseTensor<float>(processedImage, new[] { 1, 3, 224, 224 });
        
        // 推論の実行
        var inputs = new List<NamedOnnxValue>
        {
            NamedOnnxValue.CreateFromTensor("input", inputTensor)
        };
        
        using var results = _session.Run(inputs);
        var output = results.First().AsEnumerable<float>().ToArray();
        
        // Softmax適用
        var probabilities = Softmax(output);
        
        // Top-5予測の取得
        var topPredictions = probabilities
            .Select((prob, idx) => new { Probability = prob, Index = idx })
            .OrderByDescending(x => x.Probability)
            .Take(5)
            .Select(x => new Prediction
            {
                Label = _labels[x.Index],
                Confidence = x.Probability
            })
            .ToList();

        return new ImageClassificationResult
        {
            TopPredictions = topPredictions,
            ProcessingTimeMs = stopwatch.ElapsedMilliseconds
        };
    }

    // テキスト感情分析の例
    public async Task<SentimentResult> AnalyzeSentimentAsync(string text)
    {
        // BERTトークナイザーを使用したテキストエンコーディング
        var tokenizer = new BertTokenizer("vocab.txt");
        var tokens = tokenizer.Tokenize(text);
        var encoded = tokenizer.Encode(tokens);
        
        // 入力テンソルの準備
        var inputIds = new DenseTensor<long>(
            encoded.InputIds, new[] { 1, encoded.InputIds.Length });
        var attentionMask = new DenseTensor<long>(
            encoded.AttentionMask, new[] { 1, encoded.AttentionMask.Length });
        
        var inputs = new List<NamedOnnxValue>
        {
            NamedOnnxValue.CreateFromTensor("input_ids", inputIds),
            NamedOnnxValue.CreateFromTensor("attention_mask", attentionMask)
        };
        
        using var results = _session.Run(inputs);
        var logits = results.First().AsTensor<float>();
        
        var probabilities = Softmax(logits.ToArray());
        
        return new SentimentResult
        {
            Positive = probabilities[1],
            Negative = probabilities[0],
            Sentiment = probabilities[1] > 0.5 ? "Positive" : "Negative",
            Confidence = Math.Max(probabilities[0], probabilities[1])
        };
    }

    private float[] Softmax(float[] values)
    {
        var maxVal = values.Max();
        var exp = values.Select(v => Math.Exp(v - maxVal)).ToArray();
        var sumExp = exp.Sum();
        return exp.Select(e => (float)(e / sumExp)).ToArray();
    }
}

本番環境でのベストプラクティス

1. モデルバージョン管理

public class ModelVersionManager
{
    private readonly IModelRepository _repository;
    private readonly IMemoryCache _cache;
    private readonly ILogger<ModelVersionManager> _logger;

    public async Task<ITransformer> LoadModelAsync(
        string modelName, 
        string version = "latest")
    {
        var cacheKey = $"{modelName}:{version}";
        
        if (_cache.TryGetValue<ITransformer>(cacheKey, out var cachedModel))
        {
            _logger.LogDebug("Model {Name}:{Version} loaded from cache", 
                modelName, version);
            return cachedModel;
        }

        var modelInfo = await _repository.GetModelInfoAsync(modelName, version);
        
        if (modelInfo == null)
        {
            throw new ModelNotFoundException($"Model {modelName}:{version} not found");
        }

        // モデルの検証
        if (!await ValidateModelAsync(modelInfo))
        {
            throw new InvalidModelException($"Model {modelName}:{version} validation failed");
        }

        var model = await LoadModelFromStorageAsync(modelInfo.Path);
        
        // キャッシュに追加(スライディング有効期限付き)
        _cache.Set(cacheKey, model, new MemoryCacheEntryOptions
        {
            SlidingExpiration = TimeSpan.FromHours(1),
            Size = modelInfo.SizeInMB
        });

        // モデル使用状況の記録
        await _repository.LogModelUsageAsync(new ModelUsageLog
        {
            ModelName = modelName,
            Version = version,
            LoadedAt = DateTime.UtcNow,
            HostName = Environment.MachineName
        });

        return model;
    }

    private async Task<bool> ValidateModelAsync(ModelInfo modelInfo)
    {
        // チェックサムの検証
        var actualChecksum = await ComputeChecksumAsync(modelInfo.Path);
        if (actualChecksum != modelInfo.Checksum)
        {
            _logger.LogError("Checksum mismatch for model {Name}", modelInfo.Name);
            return false;
        }

        // モデルの基本的な動作確認
        try
        {
            var testModel = await LoadModelFromStorageAsync(modelInfo.Path);
            var testInput = CreateTestInput(modelInfo.InputSchema);
            var testOutput = TestPredict(testModel, testInput);
            
            return ValidateOutput(testOutput, modelInfo.OutputSchema);
        }
        catch (Exception ex)
        {
            _logger.LogError(ex, "Model validation failed for {Name}", modelInfo.Name);
            return false;
        }
    }
}

2. A/Bテストとカナリアリリース

public class ModelABTestingService
{
    private readonly Dictionary<string, ModelVariant> _variants;
    private readonly IMetricsCollector _metrics;
    
    public async Task<TPrediction> PredictWithABTestAsync<TFeatures, TPrediction>(
        TFeatures features,
        string userId,
        string experimentName)
        where TFeatures : class
        where TPrediction : class, new()
    {
        // ユーザーを実験グループに割り当て
        var variant = AssignUserToVariant(userId, experimentName);
        
        var stopwatch = Stopwatch.StartNew();
        TPrediction prediction;
        
        try
        {
            // 選択されたモデルで予測
            prediction = await variant.Model.PredictAsync<TFeatures, TPrediction>(features);
            
            // メトリクスの記録
            await _metrics.RecordPredictionAsync(new PredictionMetric
            {
                ExperimentName = experimentName,
                VariantName = variant.Name,
                UserId = userId,
                LatencyMs = stopwatch.ElapsedMilliseconds,
                Success = true,
                Timestamp = DateTime.UtcNow
            });
        }
        catch (Exception ex)
        {
            _logger.LogError(ex, "Prediction failed for variant {Variant}", variant.Name);
            
            // フォールバック処理
            if (variant.HasFallback)
            {
                prediction = await _variants["control"].Model
                    .PredictAsync<TFeatures, TPrediction>(features);
            }
            else
            {
                throw;
            }
        }

        return prediction;
    }

    private ModelVariant AssignUserToVariant(string userId, string experimentName)
    {
        // 一貫性のあるハッシュベースの割り当て
        var hash = ComputeHash($"{userId}:{experimentName}");
        var bucket = hash % 100;
        
        var cumulativeTraffic = 0;
        foreach (var variant in _variants.Values)
        {
            cumulativeTraffic += variant.TrafficPercentage;
            if (bucket < cumulativeTraffic)
            {
                return variant;
            }
        }
        
        return _variants["control"];
    }
}

3. リアルタイムモニタリングとアラート

public class MLModelMonitor
{
    private readonly IMetricsCollector _metrics;
    private readonly IAlertService _alertService;
    private readonly ConcurrentDictionary<string, ModelHealthStatus> _healthStatus;

    public async Task MonitorPredictionAsync<TPrediction>(
        string modelName,
        TPrediction prediction,
        TimeSpan latency) where TPrediction : IScorePrediction
    {
        // レイテンシーの追跡
        await _metrics.RecordMetricAsync(
            $"{modelName}.latency",
            latency.TotalMilliseconds,
            new Dictionary<string, string> { ["model"] = modelName });

        // 予測スコアの分布を追跡
        await _metrics.RecordMetricAsync(
            $"{modelName}.prediction_score",
            prediction.Score,
            new Dictionary<string, string> { ["model"] = modelName });

        // 異常検知
        var health = _healthStatus.GetOrAdd(modelName, new ModelHealthStatus());
        health.RecordPrediction(prediction.Score, latency);

        if (health.IsAnomalous())
        {
            await _alertService.SendAlertAsync(new Alert
            {
                Severity = AlertSeverity.Warning,
                Title = $"Model {modelName} showing anomalous behavior",
                Description = $"Average score: {health.AverageScore:F2}, " +
                            $"Score variance: {health.ScoreVariance:F2}, " +
                            $"P95 latency: {health.P95Latency}ms",
                ModelName = modelName,
                Timestamp = DateTime.UtcNow
            });
        }

        // データドリフトの検出
        if (await DetectDataDriftAsync(modelName, prediction))
        {
            await _alertService.SendAlertAsync(new Alert
            {
                Severity = AlertSeverity.High,
                Title = $"Data drift detected for model {modelName}",
                Description = "Input distribution has significantly changed",
                ModelName = modelName,
                Timestamp = DateTime.UtcNow
            });
        }
    }

    private async Task<bool> DetectDataDriftAsync<TPrediction>(
        string modelName, 
        TPrediction prediction)
    {
        // 簡易的なドリフト検出ロジック
        var recentPredictions = await _metrics.GetRecentPredictionsAsync(modelName, 
            TimeSpan.FromHours(1));
        
        if (recentPredictions.Count < 100)
            return false;

        var baseline = await _metrics.GetBaselineDistributionAsync(modelName);
        var current = CalculateDistribution(recentPredictions);
        
        // KL divergenceやKS検定などでドリフトを検出
        var klDivergence = CalculateKLDivergence(baseline, current);
        
        return klDivergence > 0.1; // 閾値は要調整
    }
}

public class ModelHealthStatus
{
    private readonly Queue<(float score, TimeSpan latency)> _recentPredictions;
    private readonly object _lock = new();

    public ModelHealthStatus()
    {
        _recentPredictions = new Queue<(float, TimeSpan)>();
    }

    public void RecordPrediction(float score, TimeSpan latency)
    {
        lock (_lock)
        {
            _recentPredictions.Enqueue((score, latency));
            
            // 直近1000件のみ保持
            while (_recentPredictions.Count > 1000)
            {
                _recentPredictions.Dequeue();
            }
        }
    }

    public bool IsAnomalous()
    {
        lock (_lock)
        {
            if (_recentPredictions.Count < 100)
                return false;

            var scores = _recentPredictions.Select(p => p.score).ToArray();
            var latencies = _recentPredictions.Select(p => p.latency.TotalMilliseconds).ToArray();

            // 統計的異常検知
            var scoreStdDev = CalculateStandardDeviation(scores);
            var latencyP95 = CalculatePercentile(latencies, 95);

            return scoreStdDev > 0.3 || latencyP95 > 1000;
        }
    }

    public float AverageScore => _recentPredictions.Average(p => p.score);
    public float ScoreVariance => CalculateVariance(_recentPredictions.Select(p => p.score).ToArray());
    public double P95Latency => CalculatePercentile(
        _recentPredictions.Select(p => p.latency.TotalMilliseconds).ToArray(), 95);
}

4. 効率的なバッチ処理

public class BatchPredictionService
{
    private readonly Channel<PredictionRequest> _requestChannel;
    private readonly ITransformer _model;
    private readonly MLContext _mlContext;

    public BatchPredictionService()
    {
        _requestChannel = Channel.CreateUnbounded<PredictionRequest>();
        _ = ProcessBatchesAsync(); // バックグラウンドで処理開始
    }

    public async Task<TPrediction> PredictAsync<TFeatures, TPrediction>(TFeatures features)
        where TFeatures : class
        where TPrediction : class, new()
    {
        var tcs = new TaskCompletionSource<TPrediction>();
        var request = new PredictionRequest
        {
            Features = features,
            CompletionSource = tcs,
            FeatureType = typeof(TFeatures),
            PredictionType = typeof(TPrediction)
        };

        await _requestChannel.Writer.WriteAsync(request);
        return await tcs.Task;
    }

    private async Task ProcessBatchesAsync()
    {
        var batch = new List<PredictionRequest>();
        var batchTimer = new Timer(_ => ProcessCurrentBatch(), null, 
            TimeSpan.FromMilliseconds(50), TimeSpan.FromMilliseconds(50));

        await foreach (var request in _requestChannel.Reader.ReadAllAsync())
        {
            batch.Add(request);
            
            // バッチサイズに達したら即座に処理
            if (batch.Count >= 32)
            {
                ProcessCurrentBatch();
            }
        }

        void ProcessCurrentBatch()
        {
            if (batch.Count == 0) return;

            var currentBatch = batch.ToList();
            batch.Clear();

            Task.Run(() => ProcessBatch(currentBatch));
        }
    }

    private void ProcessBatch(List<PredictionRequest> requests)
    {
        try
        {
            // 型ごとにグループ化
            var groupedRequests = requests.GroupBy(r => (r.FeatureType, r.PredictionType));

            foreach (var group in groupedRequests)
            {
                var featureType = group.Key.FeatureType;
                var predictionType = group.Key.PredictionType;

                // リフレクションを使用して汎用的に処理
                var method = GetType()
                    .GetMethod(nameof(ProcessTypedBatch), BindingFlags.NonPublic | BindingFlags.Instance)
                    .MakeGenericMethod(featureType, predictionType);

                method.Invoke(this, new object[] { group.ToList() });
            }
        }
        catch (Exception ex)
        {
            // エラー時は各リクエストに例外を設定
            foreach (var request in requests)
            {
                request.CompletionSource.SetException(ex);
            }
        }
    }

    private void ProcessTypedBatch<TFeatures, TPrediction>(List<PredictionRequest> requests)
        where TFeatures : class
        where TPrediction : class, new()
    {
        // バッチデータの準備
        var features = requests.Select(r => (TFeatures)r.Features);
        var dataView = _mlContext.Data.LoadFromEnumerable(features);

        // バッチ予測
        var predictions = _model.Transform(dataView);
        var results = _mlContext.Data
            .CreateEnumerable<TPrediction>(predictions, reuseRowObject: false)
            .ToList();

        // 結果を各リクエストに配信
        for (int i = 0; i < requests.Count; i++)
        {
            ((TaskCompletionSource<TPrediction>)requests[i].CompletionSource)
                .SetResult(results[i]);
        }
    }
}

パフォーマンス最適化

1. GPU活用

public class GPUAcceleratedInference
{
    private readonly MLContext _mlContext;
    
    public GPUAcceleratedInference()
    {
        // GPU利用の設定
        _mlContext = new MLContext(seed: 1);
        _mlContext.GpuDeviceId = 0;
        _mlContext.FallbackToCpu = true;
    }

    public async Task<float[]> RunGPUInferenceAsync(float[][] batch)
    {
        // ONNX RuntimeでのGPU推論
        var options = SessionOptions.MakeSessionOptionWithCudaProvider(0);
        options.GraphOptimizationLevel = GraphOptimizationLevel.ORT_ENABLE_ALL;
        
        using var session = new InferenceSession("model.onnx", options);
        
        // バッチ処理用のテンソル作成
        var batchSize = batch.Length;
        var inputSize = batch[0].Length;
        var flatArray = batch.SelectMany(x => x).ToArray();
        
        var inputTensor = new DenseTensor<float>(flatArray, 
            new[] { batchSize, inputSize });
        
        var inputs = new List<NamedOnnxValue>
        {
            NamedOnnxValue.CreateFromTensor("input", inputTensor)
        };
        
        using var results = session.Run(inputs);
        return results.First().AsTensor<float>().ToArray();
    }
}

2. メモリ最適化

public class MemoryEfficientPrediction
{
    private readonly ArrayPool<float> _arrayPool;
    private readonly MemoryPool<float> _memoryPool;
    
    public MemoryEfficientPrediction()
    {
        _arrayPool = ArrayPool<float>.Shared;
        _memoryPool = MemoryPool<float>.Shared;
    }

    public async Task ProcessLargeDatasetAsync(string dataPath)
    {
        const int batchSize = 1000;
        
        await foreach (var batch in ReadDataInBatchesAsync(dataPath, batchSize))
        {
            // プールからメモリを借用
            var pooledArray = _arrayPool.Rent(batch.Length * 10); // 特徴量数 × バッチサイズ
            
            try
            {
                // データ処理
                ProcessBatch(batch, pooledArray);
                
                // 予測実行
                var predictions = await PredictBatchAsync(pooledArray, batch.Length);
                
                // 結果の処理
                await SavePredictionsAsync(predictions);
            }
            finally
            {
                // メモリをプールに返却
                _arrayPool.Return(pooledArray, clearArray: true);
            }
        }
    }

    private async IAsyncEnumerable<float[]> ReadDataInBatchesAsync(
        string path, 
        int batchSize,
        [EnumeratorCancellation] CancellationToken cancellationToken = default)
    {
        using var fileStream = new FileStream(path, FileMode.Open, FileAccess.Read, 
            FileShare.Read, 4096, useAsync: true);
        using var reader = new StreamReader(fileStream);
        
        var batch = new List<float[]>(batchSize);
        
        string line;
        while ((line = await reader.ReadLineAsync()) != null)
        {
            cancellationToken.ThrowIfCancellationRequested();
            
            var values = line.Split(',').Select(float.Parse).ToArray();
            batch.Add(values);
            
            if (batch.Count >= batchSize)
            {
                yield return batch.SelectMany(x => x).ToArray();
                batch.Clear();
            }
        }
        
        if (batch.Count > 0)
        {
            yield return batch.SelectMany(x => x).ToArray();
        }
    }
}

セキュリティとコンプライアンス

1. モデルとデータの暗号化

public class SecureModelStorage
{
    private readonly IDataProtector _protector;
    private readonly X509Certificate2 _certificate;

    public async Task<string> SaveEncryptedModelAsync(ITransformer model, string modelName)
    {
        using var memoryStream = new MemoryStream();
        
        // モデルをシリアライズ
        _mlContext.Model.Save(model, null, memoryStream);
        var modelBytes = memoryStream.ToArray();
        
        // AES暗号化
        using var aes = Aes.Create();
        aes.GenerateKey();
        aes.GenerateIV();
        
        using var encryptor = aes.CreateEncryptor();
        var encryptedModel = encryptor.TransformFinalBlock(modelBytes, 0, modelBytes.Length);
        
        // キーをRSAで暗号化
        using var rsa = _certificate.GetRSAPublicKey();
        var encryptedKey = rsa.Encrypt(aes.Key, RSAEncryptionPadding.OaepSHA256);
        
        // 保存
        var secureModel = new SecureModel
        {
            ModelName = modelName,
            EncryptedData = Convert.ToBase64String(encryptedModel),
            EncryptedKey = Convert.ToBase64String(encryptedKey),
            IV = Convert.ToBase64String(aes.IV),
            Checksum = ComputeChecksum(modelBytes),
            CreatedAt = DateTime.UtcNow
        };
        
        var filePath = Path.Combine("secure_models", $"{modelName}.smodel");
        await File.WriteAllTextAsync(filePath, JsonSerializer.Serialize(secureModel));
        
        return filePath;
    }

    public async Task<ITransformer> LoadEncryptedModelAsync(string filePath)
    {
        var json = await File.ReadAllTextAsync(filePath);
        var secureModel = JsonSerializer.Deserialize<SecureModel>(json);
        
        // RSAでキーを復号化
        using var rsa = _certificate.GetRSAPrivateKey();
        var aesKey = rsa.Decrypt(
            Convert.FromBase64String(secureModel.EncryptedKey), 
            RSAEncryptionPadding.OaepSHA256);
        
        // AESでモデルを復号化
        using var aes = Aes.Create();
        aes.Key = aesKey;
        aes.IV = Convert.FromBase64String(secureModel.IV);
        
        using var decryptor = aes.CreateDecryptor();
        var encryptedData = Convert.FromBase64String(secureModel.EncryptedData);
        var modelBytes = decryptor.TransformFinalBlock(encryptedData, 0, encryptedData.Length);
        
        // チェックサム検証
        if (ComputeChecksum(modelBytes) != secureModel.Checksum)
        {
            throw new SecurityException("Model integrity check failed");
        }
        
        // モデルのロード
        using var memoryStream = new MemoryStream(modelBytes);
        return _mlContext.Model.Load(memoryStream, out _);
    }
}

2. 監査ログとコンプライアンス

public class MLAuditLogger
{
    private readonly ILogger<MLAuditLogger> _logger;
    private readonly IAuditStore _auditStore;

    public async Task LogPredictionAsync<TFeatures, TPrediction>(
        string modelName,
        TFeatures features,
        TPrediction prediction,
        string userId,
        Dictionary<string, string> metadata = null)
    {
        var auditEntry = new PredictionAuditEntry
        {
            Id = Guid.NewGuid(),
            Timestamp = DateTime.UtcNow,
            ModelName = modelName,
            ModelVersion = await GetModelVersionAsync(modelName),
            UserId = userId,
            InputHash = ComputeFeatureHash(features),
            OutputHash = ComputePredictionHash(prediction),
            Metadata = metadata ?? new Dictionary<string, string>(),
            ComplianceFlags = DetermineComplianceFlags(features)
        };

        // GDPRコンプライアンス
        if (auditEntry.ComplianceFlags.Contains("PII"))
        {
            auditEntry.InputData = null; // PIIデータは保存しない
            auditEntry.AnonymizedInput = AnonymizeFeatures(features);
        }
        else
        {
            auditEntry.InputData = JsonSerializer.Serialize(features);
        }

        await _auditStore.SaveAuditEntryAsync(auditEntry);
        
        // 異常なアクセスパターンの検出
        if (await DetectAnomalousAccessAsync(userId, modelName))
        {
            await _alertService.RaiseSecurityAlertAsync(
                $"Anomalous access pattern detected for user {userId} on model {modelName}");
        }
    }

    private List<string> DetermineComplianceFlags<TFeatures>(TFeatures features)
    {
        var flags = new List<string>();
        
        // PIIデータの検出
        var properties = typeof(TFeatures).GetProperties();
        foreach (var prop in properties)
        {
            if (prop.GetCustomAttribute<PersonalDataAttribute>() != null)
            {
                flags.Add("PII");
            }
            
            if (prop.GetCustomAttribute<SensitiveDataAttribute>() != null)
            {
                flags.Add("SENSITIVE");
            }
        }
        
        return flags;
    }
}

まとめ

C#による機械学習モデルの本番運用には、以下の大きな利点があります:

  1. エンタープライズ統合: 既存の.NETインフラとシームレスに統合
  2. 型安全性: コンパイル時のエラー検出による高い信頼性
  3. パフォーマンス: 効率的なメモリ管理とGPU活用
  4. 運用性: 充実したモニタリングとデバッグツール
  5. セキュリティ: エンタープライズグレードのセキュリティ機能
  6. 互換性: ONNXによる言語間のモデル共有

ML.NETとONNXを活用することで、Pythonで開発されたモデルをC#環境で効率的に運用し、エンタープライズ要件を満たす堅牢なMLシステムを構築できます。

エンハンスド株式会社では、ML.NETとONNXを活用した機械学習システムの設計・実装・運用支援を提供しています。モデルの本番環境への展開から、大規模システムへの統合まで、お客様のニーズに合わせたソリューションをご提案いたします。

関連リンク

技術的な課題をお持ちですか専門チームがサポートします

記事でご紹介した技術や実装について、
より詳細なご相談やプロジェクトのサポートを承ります。