LogoLogo
Orion
Orion
  • 👋Welcome
    • Orion
    • Why Validity ML?
  • 🧱Framework
    • Get Started
    • Contribute
    • Compatibility
    • Numbers
      • Fixed Point
        • fp.new
        • fp.new_unscaled
        • fp.from_felt
        • fp.abs
        • fp.ceil
        • fp.floor
        • fp.exp
        • fp.exp2
        • fp.log
        • fp.log2
        • fp.log10
        • fp.pow
        • fp.round
        • fp.sqrt
        • fp.sin
        • fp.atan
        • fp.sign
      • Complex Number
        • complex.acos
        • complex.acosh
        • complex.arg
        • complex.asin
        • complex.asinh
        • complex.atan
        • complex.atanh
        • complex.conjugate
        • complex.cos
        • complex.cosh
        • complex.exp
        • complex.exp2
        • complex.from_polar
        • complex.img
        • complex.ln
        • complex.log2
        • complex.log10
        • complex.mag
        • complex.new
        • complex.one
        • complex.pow
        • complex.real
        • complex.reciprocal
        • complex.sin
        • complex.sinh
        • complex.sqrt
        • complex.tan
        • complex.tanh
        • complex.to_polar
        • complex.zero
    • Operators
      • Tensor
        • tensor.new
        • tensor.at
        • tensor.min_in_tensor
        • tensor.min
        • tensor.max_in_tensor
        • tensor.max
        • tensor.stride
        • tensor.ravel_index
        • tensor.unravel_index
        • tensor.reshape
        • tensor.transpose
        • tensor.reduce_sum
        • tensor.argmax
        • tensor.argmin
        • tensor.matmul
        • tensor.exp
        • tensor.log
        • tensor.equal
        • tensor.greater
        • tensor.greater_equal
        • tensor.less
        • tensor.less_equal
        • tensor.abs
        • tensor.neg
        • tensor.ceil
        • tensor.cumsum
        • tensor.sin
        • tensor.cos
        • tensor.asin
        • tensor.flatten
        • tensor.sinh
        • tensor.asinh
        • tensor.cosh
        • tensor.acosh
        • tensor.tanh
        • tensor.atan
        • tensor.acos
        • tensor.sqrt
        • tensor.or
        • tensor.xor
        • tensor.onehot
        • tensor.slice
        • tensor.concat
        • tensor.gather
        • tensor.quantize_linear
        • tensor.dequantize_linear
        • tensor.qlinear_add
        • tensor.qlinear_mul
        • tensor.qlinear_matmul
        • tensor.qlinear_concat
        • tensor.qlinear_leakyrelu
        • tensor.qlinear_conv
        • tensor.nonzero
        • tensor.squeeze
        • tensor.unsqueeze
        • tensor.sign
        • tensor.clip
        • tensor.identity
        • tensor.and
        • tensor.where
        • tensor.bitwise_and
        • tensor.bitwise_xor
        • tensor.bitwise_or
        • tensor.resize
        • tensor.round
        • tensor.scatter
        • tensor.array_feature_extractor
        • tensor.binarizer
        • tensor.reduce_sum_square
        • tensor.reduce_l2
        • tensor.reduce_l1
        • tensor.reduce_prod
        • tensor.gather_elements
        • tensor.gather_nd
        • tensor.reduce_min
        • tensor.shrink
        • tensor.reduce_mean
        • tensor.pow
        • tensor.is_nan
        • tensor.is_inf
        • tensor.not
        • tensor.erf
        • tensor.reduce_log_sum
        • tensor.reduce_log_sum_exp
        • tensor.unique
        • tensor.compress
        • tensor.layer_normalization
        • tensor.scatter_nd
        • tensor.dynamic_quantize_linear
        • tensor.optional
        • tensor.reverse_sequence
        • tensor.split_to_sequence
        • tensor.range
        • tensor.hann_window
        • tensor.hamming_window
        • tensor.blackman_window
        • tensor.random_uniform_like
        • tensor.label_encoder
      • Neural Network
        • nn.relu
        • nn.leaky_relu
        • nn.sigmoid
        • nn.softmax
        • nn.softmax_zero
        • nn.logsoftmax
        • nn.softsign
        • nn.softplus
        • nn.linear
        • nn.hard_sigmoid
        • nn.thresholded_relu
        • nn.gemm
        • nn.grid_sample
        • nn.col2im
        • nn.conv_transpose
        • nn.conv
        • nn.conv_integer
        • nn.depth_to_space
        • nn.space_to_depth
        • nn.max_pool
        • nn.deform_conv
      • Machine Learning
        • Tree Ensemble Classifier
          • tree_ensemble_classifier.predict
        • Tree Ensemble Regressor
          • tree_ensemble_regressor.predict
        • Linear Classifier
          • linear_classifier.predict
        • Linear Regressor
          • linear_regressor.predict
        • SVM Regressor
          • svm_regressor.predict
        • SVM Classifier
          • svm_classifier.predict
        • Sequence
          • sequence.sequence_construct
          • sequence.sequence_empty
          • sequence.sequence_length
          • sequence.sequence_at
          • sequence.sequence_empty
          • sequence.sequence_erase
          • sequence.sequence_insert
          • sequence.concat_from_sequence
        • Normalizer
          • normalize.predict
  • 🏛️Hub
    • Models
    • Spaces
  • 🧑‍🎓Academy
    • Tutorials
      • MNIST Classification with Orion
      • Implement new operators in Orion
      • Verifiable Linear Regression Model
      • Verifiable Support Vector Machine
      • Verifiable Principal Components Analysis
      • Provable MLR: Forecasting AAVE's Lifetime Repayments
Powered by GitBook
On this page
  • Args
  • Returns
  • Examples
Edit on GitHub
  1. Framework
  2. Operators
  3. Neural Network

nn.max_pool

    fn max_pool(
    X: @Tensor<T>,
    auto_pad: Option<AUTO_PAD>,
    ceil_mode: Option<usize>,
    dilations: Option<Span<usize>>,
    kernel_shape: Span<usize>,
    pads: Option<Span<usize>>,
    storage_order: Option<usize>,
    strides: Option<Span<usize>>,
    output_len: usize,
) -> (Tensor<T>, Option<Tensor<usize>>);

MaxPool consumes an input tensor X and applies max pooling across the tensor according to kernel sizes, stride sizes, and pad lengths. max pooling consisting of computing the max on all values of a subset of the input tensor according to the kernel size and downsampling the data into the output tensor Y for further processing. The output spatial shape is calculated differently depending on whether explicit padding is used, where pads is employed, or auto padding is used, where auto_pad is utilized.

Args

  • X(@Tensor<T>) - Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size.

  • auto_pad(Option<AUTO_PAD>) - Default is NOTSET, auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. NOTSET means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i.

  • ceil_mode(Option<usize>) - Default is 1, Whether to use ceil or floor (default) to compute the output shape.

  • dilations(Option<Span<usize>>) - Dilation value along each spatial axis of the filter. If not present, the dilation defaults to 1 along each spatial axis.

  • kernel_shape(Span<usize>) - The size of the kernel along each axis.

  • pads(Option<Span<usize>>) - Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. pads format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis i and xi_end, the number of pixels added at the end of axis i. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.

  • storage_order(Option<usize>) - Default is 0, The storage order of the tensor. 0 is row major, and 1 is column major.

  • strides(Option<Span<usize>>) - Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis.

  • output_len(Option<usize>) - Default is 1, If set to 2, return the indices tensor.

Returns

A Tensor<T> that contains the result of the max pool. A Option<Tensor<usize>> with the indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor.

Examples

use orion::operators::nn::NNTrait;
use orion::numbers::FixedTrait;
use orion::operators::nn::FP16x16NN;
use orion::numbers::FP16x16;
use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor};


fn example_max_pool() -> (Tensor<FP16x16>, Option<Tensor<usize>>) {
    let mut shape = ArrayTrait::<usize>::new();
   shape.append(1);
   shape.append(1);
   shape.append(5);
   shape.append(5);
   let mut data = ArrayTrait::new();
   data.append(FP16x16 { mag: 65536, sign: false });
   data.append(FP16x16 { mag: 131072, sign: false });
   data.append(FP16x16 { mag: 196608, sign: false });
   data.append(FP16x16 { mag: 262144, sign: false });
   data.append(FP16x16 { mag: 327680, sign: false });
   data.append(FP16x16 { mag: 393216, sign: false });
   data.append(FP16x16 { mag: 458752, sign: false });
   data.append(FP16x16 { mag: 524288, sign: false });
   data.append(FP16x16 { mag: 589824, sign: false });
   data.append(FP16x16 { mag: 655360, sign: false });
   data.append(FP16x16 { mag: 720896, sign: false });
   data.append(FP16x16 { mag: 786432, sign: false });
   data.append(FP16x16 { mag: 851968, sign: false });
   data.append(FP16x16 { mag: 917504, sign: false });
   data.append(FP16x16 { mag: 983040, sign: false });
   data.append(FP16x16 { mag: 1048576, sign: false });
   data.append(FP16x16 { mag: 1114112, sign: false });
   data.append(FP16x16 { mag: 1179648, sign: false });
   data.append(FP16x16 { mag: 1245184, sign: false });
   data.append(FP16x16 { mag: 1310720, sign: false });
   data.append(FP16x16 { mag: 1376256, sign: false });
   data.append(FP16x16 { mag: 1441792, sign: false });
   data.append(FP16x16 { mag: 1507328, sign: false });
   data.append(FP16x16 { mag: 1572864, sign: false });
   data.append(FP16x16 { mag: 1638400, sign: false });
   let mut X = TensorTrait::new(shape.span(), data.span());
   return NNTrait::max_pool(
       @X,
       Option::None,
       Option::None,
       Option::None,
       array![5, 5, 5].span(),
       Option::Some(array![2, 2, 2, 2].span()),
       Option::None,
       Option::None,
       1
   );

}

>>> ([
           [
               [
                   [13, 14, 15, 15, 15],
                   [18, 19, 20, 20, 20],
                   [23, 24, 25, 25, 25],
                   [23, 24, 25, 25, 25],
                   [23, 24, 25, 25, 25],
               ]
           ]
       ], 
       Option::None)

Previousnn.space_to_depthNextnn.deform_conv

Last updated 1 year ago

🧱