Class NetBuilder

Inheritance Relationships

Base Type

Class Documentation

class cinn::frontend::NetBuilder : public cinn::frontend::BaseBuilder

Public Functions

Variable identity(const Variable &a)
Variable add(const Variable &a, const Variable &b)

Add two variables.

Variable reshape(const Variable &a, const std::vector<int> &shape)

Reshape Variable.

Variable transpose(const Variable &a, const std::vector<int> &axis)

Transpose matrix.

Variable mul(const Variable &a, const Variable &b, int x_num_col_dims = 1, int y_num_col_dims = 1)

Multiply two matrix.

Variable mulbias(const Variable &a, const Variable &b, const Variable &c, int x_num_col_dims = 1, int y_num_col_dims = 1)

Multiply two matrix and add a bias.

Variable elementwise_add(const Variable &a, const Variable &b, int axis = -1)

Add two tensors element-wise.

const std::vector<Variable> &elementwise_add_grad(const Variable &dout, const Variable &x, const Variable &y, int axis = -1)

The gradient of elementwise_add.

Variable elementwise_mul(const Variable &a, const Variable &b, int axis = -1)

Multiply two tensors element-wise.

Variable relu(const Variable &a)

Apply Rectified Linear Unit on input Variable. Actually apply: outupt = max(input,0)

Variable relu_grad(const Variable &dout, const Variable &out)

The gradient of Rectified Linear Unit. Actually apply: dx = dout * (out > 0)

Variable relu6(const Variable &a, float threshold = 6.0f)
Variable reverse(const Variable &x, const std::vector<int> &axis)

This API reverses the Variable x along the given axis. Example 1: x = [[0, 1], [2, 3], [4, 5]], axis = [0] output = [[4, 5], [2, 3], [0, 1]] Example 2: x = [[0, 1], [2, 3], [4, 5]], axis = [0, 1] output = [[5, 4], [3, 2], [1, 0]]

Variable reduce_sum(const Variable &x, const std::vector<int> &dim, bool keep_dim = false)

Compute the sum of Variable x along the given dim.

Variable conv2d(const Variable &a, const Variable &b, const std::vector<int> &strides = {1, 1}, const std::vector<int> &paddings = {0, 0}, const std::vector<int> &dilations = {1, 1}, int groups = 1, const std::string &data_format = "NCHW", const std::string &padding_algorithm = "EXPLICIT")

The convolution2D layer calculates the output based on the input, filter and strides, paddings, dilations, groups parameters.

Variable depthwise_conv2d(const Variable &a, const Variable &b, const std::vector<int> &strides = {1, 1}, const std::vector<int> &paddings = {0, 0}, const std::vector<int> &dilations = {1, 1}, int groups = 1, const std::string &data_format = "NCHW", const std::string &padding_algorithm = "EXPLICIT")
Variable pool2d(const Variable &a, const std::string &pooling_type, const std::vector<int> &ksize, const std::vector<int> &strides = {1, 1}, const std::vector<int> &paddings = {0, 0}, bool ceil_mode = false, bool exclusive = true, bool global_pooling = false, const std::string &data_format = "NCHW", bool adaptive = false, const std::string &padding_algorithm = "EXPLICIT")
std::vector<Variable> batchnorm(const Variable &a, const Variable &scale, const Variable &bias, const Variable &mean, const Variable &variance, float epsilon = 1e-5f, float momentum = 0.9f, const std::string &data_layout = "NCHW", bool is_test = false)

The batchnorm layer can be used as a normalizer function for convolution or fully_connected operations. is_test(true): batch norm infer (default), output={y} is_test(false): batch norm training, outputs={y, saved_mean, saved_variance, moving_mean, moving_variance}

std::vector<Variable> batch_norm_grad(const Variable &dy, const Variable &x, const Variable &scale, const Variable &save_mean, const Variable &save_variance, const float epsilon = 1e-5, const std::string &data_layout = "NCHW")
Variable scale(const Variable &a, float scale = 1.0f, float bias = 0.0f, bool bias_after_scale = true)
Variable softmax(const Variable &a, int axis = -1, const std::string &data_format = "AnyLayout")
Variable sigmoid(const Variable &a)
Variable slice(const Variable &a, const std::vector<int> &axes, const std::vector<int> &starts = {}, const std::vector<int> &ends = {}, const std::vector<int> &infer_flags = {}, const std::vector<int> &decrease_axis = {})
Variable dropout_infer(const Variable &a, float dropout_prob = 0.5f, const std::string &dropout_implementation = "downgrade_in_infer")
Variable sum(const std::vector<Variable> &inputs)
std::vector<Variable> conv2d_grad(const Variable &dy, const Variable &x, const Variable &w, const std::vector<int> &strides = {1, 1}, const std::vector<int> &paddings = {0, 0}, const std::vector<int> &dilations = {1, 1}, const int groups = 1, const std::string &data_format = "NCHW", const std::string &padding_algorithm = "EXPLICIT")