Module Ocannl_tensor.Operation

Computational primitives for neural networks, integrating Tensor with Assignments.

module Asgns = Ir.Assignments
module Idx = Ir.Indexing
module Tn = Ir.Tnode
val _get_local_debug_runtime : unit -> (module Minidebug_runtime.Debug_runtime)
val grad : Tensor.t -> Tensor.tn
module At : sig ... end
module Initial_NTDSL : sig ... end
val compose_op_of_spec : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> unit -> Shape.compose_type

Helper to compute compose_op: if spec is provided, use Einsum; otherwise use Pointwise_bin.

val transpose_op_of_spec : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> unit -> Shape.transpose_type

Helper to compute transpose_op: if spec is provided, use Permute; otherwise use Pointwise_un.

val add : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun
val sub : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun
val mul : Shape.compose_type -> op_asn: (t:Tensor.t -> t1:Tensor.t -> t2:Tensor.t -> projections:Tensor.projections -> Tensor.comp) -> ?op_label:Base.string -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun
val pointmul : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun
val matmul : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun
module NDO_before_pow : sig ... end
val is_prohibit_grad : Tensor.grad_spec option -> bool
val pointpow : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> ?grad_spec:Tensor.grad_spec -> Base.float -> Tensor.t -> Tensor.op_fun
module NDO_before_div : sig ... end
module NTDSL_before_div : sig ... end
val pointdiv : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun
val relu : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val sat01 : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val exp : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val log : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val log_2 : Base.Float.t
val exp2 : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val log2 : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val sin : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val cos : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val sqrt : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val recip : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val recip_sqrt : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val tanh : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val neg : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val not : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val uint4x32_to_prec_uniform : ?grad_spec:Tensor.grad_spec -> Tensor.t -> ?label:Base.string Base.list -> ?top_down_prec:Base.bool -> ?batch_dims:Base.int Base.list -> ?batch_axes:(Base.string * Base.int) Base.list -> Tensor.param_op_fun
val uint4x32_to_prec_uniform1 : ?grad_spec:Tensor.grad_spec -> Tensor.t -> ?label:Base.string Base.list -> ?top_down_prec:Base.bool -> ?batch_dims:Base.int Base.list -> ?batch_axes:(Base.string * Base.int) Base.list -> Tensor.param_op_fun
val lt : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun
val eq : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun
val ne : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun
val interleave : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun
val threefry4x32_crypto : Tensor.t -> Tensor.t -> ?grad_spec:Tensor.grad_spec -> ?label:Base.string Base.list -> ?batch_dims:Base.int Base.list -> ?batch_axes:(Base.string * Base.int) Base.list -> ?input_dims:Base.int Base.list -> ?output_dims:Base.int Base.list -> ?input_axes:(Base.string * Base.int) Base.list -> ?output_axes:(Base.string * Base.int) Base.list -> ?deduced:Shape.deduce_within_shape -> unit -> Tensor.t
val threefry4x32_light : Tensor.t -> Tensor.t -> ?grad_spec:Tensor.grad_spec -> ?label:Base.string Base.list -> ?batch_dims:Base.int Base.list -> ?batch_axes:(Base.string * Base.int) Base.list -> ?input_dims:Base.int Base.list -> ?output_dims:Base.int Base.list -> ?input_axes:(Base.string * Base.int) Base.list -> ?output_axes:(Base.string * Base.int) Base.list -> ?deduced:Shape.deduce_within_shape -> unit -> Tensor.t
val threefry4x32 : Tensor.t -> Tensor.t -> ?grad_spec:Tensor.grad_spec -> ?label:Base.string Base.list -> ?batch_dims:Base.int Base.list -> ?batch_axes:(Base.string * Base.int) Base.list -> ?input_dims:Base.int Base.list -> ?output_dims:Base.int Base.list -> ?input_axes:(Base.string * Base.int) Base.list -> ?output_axes:(Base.string * Base.int) Base.list -> ?deduced:Shape.deduce_within_shape -> unit -> Tensor.t
val fma : grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.t -> Tensor.op_fun
val where : grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.t -> Tensor.op_fun
val einsum : ?capture_dims:Shape.delayed_var_ref Base.list -> Base.string -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun

Similar to the explicit mode of numpy.einsum, the binary variant. Can compute various forms of matrix multiplication, inner and outer products, etc.

Note that "a,b->c" from numpy is "a;b=>c" in OCANNL, since "->" is used to separate the input and the output axes.

val outer_sum : ?capture_dims:Shape.delayed_var_ref Base.list -> Base.string -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun

Like einsum, but adds instead than multiplying the resulting values.

val einsum1 : ?capture_dims:Shape.delayed_var_ref Base.list -> Base.string -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun

Similar to the explicit mode of numpy.einsum, the unary variant. Can permute axes, extract diagonals, compute traces etc.

Note that "a->c" from numpy is "a=>c" in OCANNL, since "->" is used to separate the input and the output axes.

module NDO_before_einmax1 : sig ... end
val einmax1 : ?capture_dims:Shape.delayed_var_ref Base.list -> Base.string -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val tropical : ?capture_dims:Shape.delayed_var_ref Base.list -> Base.string -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun

This generalizes the tropical matrix multiplication to arbitrary indices combinations.

LIMITATION: Backpropagation is only correct when the RHS1 (t1) index space includes the RHS2 (t2) index space. This is the case for convolution-like operations where the kernel indices are contracted with strided input indices. For general tropical operations where RHS2 has independent indices, the g2 gradient will be incorrect.

val offsets : ?grad_spec:Tensor.grad_spec -> Tensor.op_fun

A fully-shape-inferred tensor that is initialized with the offset of each cell.

val range : ?label:Base.string list -> ?grad_spec:Tensor.grad_spec -> ?axis_label:Base.string -> Base__Int.t -> Tensor.t

range is a 1D tensor of shape upto, spans 0 inclusive, upto exclusive.

val range_of_shape : ?label:Base.string list -> ?grad_spec:Tensor.grad_spec -> ?batch_dims:Base.Int.t Base.List.t -> ?input_dims:Base.Int.t Base.List.t -> ?output_dims:Base.Int.t Base.List.t -> ?batch_axes:(Base.string * Base.Int.t) Base.List.t -> ?input_axes:(Base.string * Base.Int.t) Base.List.t -> ?output_axes:(Base.string * Base.Int.t) Base.List.t -> unit -> Tensor.t
val stop_gradient : ?spec:Base.string -> ?capture_dims:Shape.delayed_var_ref Base.list -> Tensor.t -> Tensor.op_fun

A stop_gradient is an identity in the forward pass and a no-op in the backprop pass.

val embed_symbol : ?grad_spec:Tensor.grad_spec -> ?label:Base.string list -> Ir.Indexing.static_symbol -> Tensor.t
val embed_self_id : ?grad_spec:Tensor.grad_spec -> ?label:Base.string list -> unit -> Tensor.t
val embed_dim : ?grad_spec:Tensor.grad_spec -> ?label:Base.string list -> Shape.delayed_var_ref -> Tensor.t
val uniform : ?grad_spec:Tensor.grad_spec -> unit -> ?label:Base.string Base.list -> ?top_down_prec:Base.bool -> ?batch_dims:Base.int Base.list -> ?batch_axes:(Base.string * Base.int) Base.list -> Tensor.param_op_fun
val uniform_at : ?grad_spec:Tensor.grad_spec -> Tensor.t -> ?label:Base.string Base.list -> ?top_down_prec:Base.bool -> ?batch_dims:Base.int Base.list -> ?batch_axes:(Base.string * Base.int) Base.list -> Tensor.param_op_fun

Generates a single uniform random number using a counter symbol for PRNG state. This is useful for sequential sampling in recurrent contexts.

val uniform1 : ?grad_spec:Tensor.grad_spec -> unit -> ?label:Base.string Base.list -> ?top_down_prec:Base.bool -> ?batch_dims:Base.int Base.list -> ?batch_axes:(Base.string * Base.int) Base.list -> Tensor.param_op_fun

A wasteful variant of uniform that produces a single value from each 4x32 random bits. The bit-spreading in int32_to_uint4x32/uint32_to_uint4x32 ensures good entropy even with the 2-round "light" threefry variant.

val uniform_at1 : ?grad_spec:Tensor.grad_spec -> Tensor.t -> ?label:Base.string Base.list -> ?top_down_prec:Base.bool -> ?batch_dims:Base.int Base.list -> ?batch_axes:(Base.string * Base.int) Base.list -> Tensor.param_op_fun

A wasteful variant of uniform_at that produces a single value from each 4x32 random bits. The bit-spreading in int32_to_uint4x32/uint32_to_uint4x32 ensures good entropy even with the 2-round "light" threefry variant.

val reshape : l:Base.string -> ?b:Base.int Base.list -> ?i:Base.int Base.list -> ?o:Base.int Base.list -> Ir.Ndarray.t -> ?fetch_op:Tensor.fetch_op -> ?grad_spec:Tensor.grad_spec -> ?top_down_prec:Base.bool -> ?batch_axes:(Base.string * Base.int) Base.list -> ?input_axes:(Base.string * Base.int) Base.list -> ?output_axes:(Base.string * Base.int) Base.list -> ?deduced:Shape.deduce_within_shape -> Base.unit -> Tensor.t

The input i dimensions default to empty. The batch and output dimensions will be inferred if omitted. Note: the data should have no padding and if padding is inferred, the data will be copied; otherwise, the resulting tensor value shares host memory with the ndarray.

val wrap : l:Base.string -> ?b:Base.int Base.list -> ?i:Base.int Base.list -> ?o:Base.int Base.list -> Ir.Ndarray.t -> ?fetch_op:Tensor.fetch_op -> ?grad_spec:Tensor.grad_spec -> ?top_down_prec:Base.bool -> ?batch_axes:(Base.string * Base.int) Base.list -> ?input_axes:(Base.string * Base.int) Base.list -> ?output_axes:(Base.string * Base.int) Base.list -> ?deduced:Shape.deduce_within_shape -> Base.unit -> Tensor.t

The dimensions are taken from the provided ndarray, but the split into axis kinds still needs to be inferred (or provided). Assumes no padding. Input axes are not inferred (empty if omitted). See also: reshape and TDSL.wrap_param.

val wrap_padded : l:Base.string -> ?b:Base.int Base.list -> ?i:Base.int Base.list -> ?o:Base.int Base.list -> padding:Ir.Ops.axis_padding Base.array -> padded_value:Base.float -> Ir.Assignments.Nd.t -> ?fetch_op:Tensor.fetch_op -> ?grad_spec:Tensor.grad_spec -> ?top_down_prec:Base.bool -> ?batch_axes:(Base.string * Base.int) Base.list -> ?input_axes:(Base.string * Base.int) Base.list -> ?output_axes:(Base.string * Base.int) Base.list -> ?deduced:Shape.deduce_within_shape -> Base.unit -> Tensor.t

Assumes the ndarray is padded as given. This means the dimensions of the ndarray will differ from the dimensions of the tensor by the padding. See also: TDSL.wrap.

val rebatch : l:Base.string -> Ir.Ndarray.t -> ?fetch_op:Tensor.fetch_op -> ?grad_spec:Tensor.grad_spec -> ?top_down_prec:Base.bool -> ?batch_dims:Base.int Base.list -> ?batch_axes:(Base.string * Base.int) Base.list -> ?deduced:Shape.deduce_within_shape -> Base.unit -> Tensor.t

The output dimensions are taken from the provided ndarray, assuming precisely the first axis is a batch axis, assumes no input axes and the batch dimensions are inferred. Empty output dimensions are allowed and represent scalars. Assumes the data has no padding, and data is copied if padding is inferred. See also: reshape and wrap.

val init : l:Base.string -> prec:Ir.Ops.prec -> ?b:Base.int Base__List.t -> ?i:Base.int Base__List.t -> ?o:Base.int Base__List.t -> f:(Base.int Base.array -> Base.float) -> ?fetch_op:Tensor.fetch_op -> ?grad_spec:Tensor.grad_spec -> ?top_down_prec:Base.bool -> ?batch_axes:(Base.string * Base.int) Base.list -> ?input_axes:(Base.string * Base.int) Base.list -> ?output_axes:(Base.string * Base.int) Base.list -> ?deduced:Shape.deduce_within_shape -> Base.unit -> Tensor.t

Creates a tensor by initializing values using a function from indices to values. The dimensions are split into axis kinds as specified, there is no shape inference. Recall that input axes are rightmost.

module Make_DSL (Grad_spec : sig ... end) : sig ... end
module DSL_modules : sig ... end