Struct Program

Struct Documentation

struct cinn::frontend::Program

Program is a representation of a computation.

Public Types

using attr_t = hlir::framework::NodeAttr::attr_t

Public Functions

Program() = default
Program(std::vector<Instruction> &&instrs, std::vector<Variable> &&inputs)
void SetInputs(const std::vector<Variable> &xs)
const std::vector<Variable> &GetInputs() const
template<typename PrimType>
Variable primitive_const_scalar(PrimType value, const std::string &name)

create scalar with the specific value and type

template<typename PrimType>
Variable fill_constant(const std::vector<int> &shape, float float_value, const std::string &str_value, bool force_cpu, const std::string &name)

create tensor with the specific shape, type and value

Variable add(const Variable &a, const Variable &b)

Add two variables.

Return

The result.

Parameters
  • a: The first variable.

  • b: The second variable.

Variable multiply(const Variable &a, const Variable &b)
Variable mul(const Variable &a, const Variable &b, int x_num_col_dims = 1, int y_num_col_dims = 1)

Multiply two matrix.

Variable matmul(const Variable &a, const Variable &b, bool trans_a = false, bool trans_b = false, float alpha = 1)

Multiply two matrix.

Variable reshape(const Variable &a, const std::vector<int> &shape)

Reshape a tensor.

Return

The reshaped output tensor.

Parameters
  • a: The input tensor.

  • shape: The output tensor’s shape we specified.

Variable concat(const std::vector<Variable> &input_vars, int axis = 0)

Concat tensors.

Return

The concated output tensor.

Parameters
  • input_vars: The input tensors.

  • axis: The axis specified to do the concat operation.

Variable transpose(const Variable &input_vars, const std::vector<int> &axis)
Variable mulbias(const Variable &a, const Variable &b, const Variable &c, int x_num_col_dims = 1, int y_num_col_dims = 1)

Multiply two matrix and add a bias.

Variable primitive_exp(const Variable &a)
Variable primitive_erf(const Variable &a)
Variable primitive_sqrt(const Variable &a)
Variable primitive_log(const Variable &a)
Variable primitive_floor(const Variable &a)
Variable primitive_ceil(const Variable &a)
Variable primitive_round(const Variable &a)
Variable primitive_tanh(const Variable &a)
Variable primitive_log2(const Variable &a)
Variable primitive_log10(const Variable &a)
Variable primitive_trunc(const Variable &a)
Variable primitive_cos(const Variable &a)
Variable primitive_sin(const Variable &a)
Variable primitive_cosh(const Variable &a)
Variable primitive_tan(const Variable &a)
Variable primitive_sinh(const Variable &a)
Variable primitive_acos(const Variable &a)
Variable primitive_acosh(const Variable &a)
Variable primitive_asin(const Variable &a)
Variable primitive_asinh(const Variable &a)
Variable primitive_atan(const Variable &a)
Variable primitive_atanh(const Variable &a)
Variable primitive_isnan(const Variable &a)
Variable primitive_isfinite(const Variable &a)
Variable primitive_isinf(const Variable &a)
Variable primitive_bitwise_not(const Variable &a)
Variable primitive_negative(const Variable &a)
Variable primitive_identity(const Variable &a)
Variable primitive_logical_not(const Variable &a)
Variable primitive_sign(const Variable &a)
Variable primitive_abs(const Variable &a)
Variable primitive_rsqrt(const Variable &a)
Variable primitive_substract(const Variable &a, const Variable &b)
Variable primitive_divide(const Variable &a, const Variable &b)
Variable primitive_floor_divide(const Variable &a, const Variable &b)
Variable primitive_mod(const Variable &a, const Variable &b)
Variable primitive_floor_mod(const Variable &a, const Variable &b)
Variable primitive_max(const Variable &a, const Variable &b)
Variable primitive_min(const Variable &a, const Variable &b)
Variable primitive_power(const Variable &a, const Variable &b)
Variable primitive_logical_and(const Variable &a, const Variable &b)
Variable primitive_logical_or(const Variable &a, const Variable &b)
Variable primitive_logical_xor(const Variable &a, const Variable &b)
Variable primitive_greater(const Variable &a, const Variable &b)
Variable primitive_less(const Variable &a, const Variable &b)
Variable primitive_equal(const Variable &a, const Variable &b)
Variable primitive_not_equal(const Variable &a, const Variable &b)
Variable primitive_greater_equal(const Variable &a, const Variable &b)
Variable primitive_less_equal(const Variable &a, const Variable &b)
Variable primitive_bitwise_or(const Variable &a, const Variable &b)
Variable primitive_bitwise_xor(const Variable &a, const Variable &b)
Variable primitive_bitwise_and(const Variable &a, const Variable &b)
Variable primitive_left_shift(const Variable &a, const Variable &b)
Variable primitive_right_shift(const Variable &a, const Variable &b)
Variable reduce_sum(const Variable &a, const std::vector<int> &dim, bool keep_dim = false)
Variable reduce_prod(const Variable &a, const std::vector<int> &dim, bool keep_dim = false)
Variable reduce_min(const Variable &a, const std::vector<int> &dim, bool keep_dim = false)
Variable reduce_max(const Variable &a, const std::vector<int> &dim, bool keep_dim = false)
Variable primitive_broadcast_to(const Variable &a, const std::vector<int> &out_shape, const std::vector<int> &broadcast_axes)

broadcast one operand to the target shape broadcast axes: the target axis which a’s ith axis is mapped to Notes: a’s dim should be one or same with the output dim mapped to. e.g. if a[64] broadcasts to out[1, 64, 112, 112], then out_shape is {1, 64, 112, 112} and broadcast_axes are {1}

Variable elementwise_add(const Variable &a, const Variable &b, int axis = -1)

Add two tensors element-wise.

Variable elementwise_mul(const Variable &a, const Variable &b, int axis = -1)

Multiply two tensors element-wise.

Variable elementwise_div(const Variable &a, const Variable &b, int axis = -1)

Divide two tensors element-wise.

Variable elementwise_sub(const Variable &a, const Variable &b, int axis = -1)

Substract two tensors element-wise.

Variable assign(const Variable &a)
Variable relu(const Variable &a)

Apply Rectified Linear Unit on input Variable. Actually apply: outupt = max(input,0)

Return

The result.

Parameters
  • a: The first variable.

Variable relu6(const Variable &a)
Variable conv2d(const Variable &a, const Variable &b, const absl::flat_hash_map<std::string, attr_t> &attr_store)

The convolution2D layer calculates the output based on the input, filter and strides, paddings, dilations, groups parameters.

Return

The result.

Parameters
  • a: The first variable input.

  • b: The second variable filter(weights).

  • attr_store: The params like padding, stride, dilation, etc.

Variable layout_transform(const Variable &a, const absl::flat_hash_map<std::string, attr_t> &attr_store)
Variable conv2d_NCHWc(const Variable &a, const Variable &b, const absl::flat_hash_map<std::string, attr_t> &attr_store)
Variable depthwise_conv2d(const Variable &a, const Variable &b, const absl::flat_hash_map<std::string, attr_t> &attr_store)
Variable pool2d(const Variable &a, const absl::flat_hash_map<std::string, attr_t> &attr_store)
Variable batchnorm(const Variable &a, const Variable &scale, const Variable &bias, const Variable &mean, const Variable &variance, const absl::flat_hash_map<std::string, attr_t> &attr_store)

The batchnorm layer can be used as a normalizer function for convolution or fully_connected operations.

Return

The result.

Parameters
  • a: The first variable input.

  • b: The second variable filter(weights).

  • attr_store: The params like eplison.

Variable fused_meta_batchnorm_inference(const Variable &a, const Variable &scale, const Variable &bias, const Variable &mean, const Variable &variance, const absl::flat_hash_map<std::string, attr_t> &attr_store)

batchnorm composed of primitive ops

Variable fused_batchnorm_inference(const Variable &a, const Variable &scale, const Variable &bias, const Variable &mean, const Variable &variance, const absl::flat_hash_map<std::string, attr_t> &attr_store)
Variable scale(const Variable &a, const absl::flat_hash_map<std::string, attr_t> &attr_store)
Variable softmax(const Variable &a, const absl::flat_hash_map<std::string, attr_t> &attr_store)
Variable sigmoid(const Variable &a)
Variable slice(const Variable &a, const absl::flat_hash_map<std::string, attr_t> &attr_store)
Variable dropout_infer(const Variable &a, const absl::flat_hash_map<std::string, attr_t> &attr_store)
Instruction &operator[](size_t i)

Get i-th instruction.

const Instruction &operator[](size_t i) const

Get i-th instruction.

size_t size() const

Get number of instructions in the program.

void Validate() const
void AppendInstruction(const Instruction &other)