HEX
Server: nginx/1.29.3
System: Linux 11979.bigscoots-wpo.com 6.8.0-88-generic #89-Ubuntu SMP PREEMPT_DYNAMIC Sat Oct 11 01:02:46 UTC 2025 x86_64
User: nginx (1068)
PHP: 7.4.33
Disabled: exec,system,passthru,shell_exec,proc_open,proc_close,popen,show_source,cmd# Do not modify this line # 1684243876
Upload Files
File: //usr/include/llvm/Analysis/NoInferenceModelRunner.h
//===- NoInferenceModelRunner.h ---- noop ML model runner  ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//

#ifndef LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
#define LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H

#include "llvm/Analysis/MLModelRunner.h"
namespace llvm {
class TensorSpec;

/// A pseudo model runner. We use it to store feature values when collecting
/// logs for the default policy, in 'development' mode, but never ask it to
/// 'run'.
class NoInferenceModelRunner : public MLModelRunner {
public:
  NoInferenceModelRunner(LLVMContext &Ctx,
                         const std::vector<TensorSpec> &Inputs);

  static bool classof(const MLModelRunner *R) {
    return R->getKind() == MLModelRunner::Kind::NoOp;
  }

private:
  void *evaluateUntyped() override {
    llvm_unreachable("We shouldn't call run on this model runner.");
  }
};
} // namespace llvm
#endif // LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H