Module Ocannl_tensor.Operation

Computational primitives for neural networks, integrating Tensor with Assignments.

module Asgns = Ir.Assignments
module Idx = Ir.Indexing
module Tn = Ir.Tnode
val _get_local_debug_runtime : unit -> (module Minidebug_runtime.Debug_runtime)
val grad : Tensor.t -> Tensor.tn
module At : sig ... end
module Initial_NTDSL : sig ... end
val add : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun
val sub : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun
val mul : Shape.compose_type -> op_asn: (v:Tensor.tn -> t1:Tensor.t -> t2:Tensor.t -> projections:Tensor.projections -> Tensor.comp) -> ?op_label:Base.string -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun
val pointmul : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun
val matmul : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun
module NDO_before_pow : sig ... end
val is_prohibit_grad : Tensor.grad_spec option -> bool
val pointpow : ?grad_spec:Tensor.grad_spec -> Base.float -> Tensor.t -> Tensor.op_fun
module NDO_before_div : sig ... end
module NTDSL_before_div : sig ... end
val pointdiv : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun
val relu : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val sat01 : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val exp : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val log : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val log_2 : Base.Float.t
val exp2 : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val log2 : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val sin : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val cos : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val sqrt : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val recip : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val recip_sqrt : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val tanh : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val neg : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val not : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val uint4x32_to_prec_uniform : ?grad_spec:Tensor.grad_spec -> Tensor.t -> ?label:Base.string Base.list -> ?top_down_prec:Base.bool -> ?batch_dims:Base.int Base.list -> ?batch_axes:(Base.string * Base.int) Base.list -> Tensor.param_op_fun
val uint4x32_to_prec_uniform1 : ?grad_spec:Tensor.grad_spec -> Tensor.t -> ?label:Base.string Base.list -> ?top_down_prec:Base.bool -> ?batch_dims:Base.int Base.list -> ?batch_axes:(Base.string * Base.int) Base.list -> Tensor.param_op_fun
val lt : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun
val eq : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun
val ne : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun
val threefry4x32_crypto : Tensor.t -> Tensor.t -> ?grad_spec:Tensor.grad_spec -> ?label:Base.string Base.list -> ?batch_dims:Base.int Base.list -> ?batch_axes:(Base.string * Base.int) Base.list -> ?input_dims:Base.int Base.list -> ?output_dims:Base.int Base.list -> ?input_axes:(Base.string * Base.int) Base.list -> ?output_axes:(Base.string * Base.int) Base.list -> ?deduced:Shape.deduce_within_shape -> unit -> Tensor.t
val threefry4x32_light : Tensor.t -> Tensor.t -> ?grad_spec:Tensor.grad_spec -> ?label:Base.string Base.list -> ?batch_dims:Base.int Base.list -> ?batch_axes:(Base.string * Base.int) Base.list -> ?input_dims:Base.int Base.list -> ?output_dims:Base.int Base.list -> ?input_axes:(Base.string * Base.int) Base.list -> ?output_axes:(Base.string * Base.int) Base.list -> ?deduced:Shape.deduce_within_shape -> unit -> Tensor.t
val threefry4x32 : Tensor.t -> Tensor.t -> ?grad_spec:Tensor.grad_spec -> ?label:Base.string Base.list -> ?batch_dims:Base.int Base.list -> ?batch_axes:(Base.string * Base.int) Base.list -> ?input_dims:Base.int Base.list -> ?output_dims:Base.int Base.list -> ?input_axes:(Base.string * Base.int) Base.list -> ?output_axes:(Base.string * Base.int) Base.list -> ?deduced:Shape.deduce_within_shape -> unit -> Tensor.t
val fma : grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.t -> Tensor.op_fun
val where : grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.t -> Tensor.op_fun
val einsum : ?capture_dims:Shape.delayed_var_ref Base.list -> Base.string -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun

Similar to the explicit mode of numpy.einsum, the binary variant. Can compute various forms of matrix multiplication, inner and outer products, etc.

Note that "a,b->c" from numpy is "a;b=>c" in OCANNL, since "->" is used to separate the input and the output axes.

val outer_sum : ?capture_dims:Shape.delayed_var_ref Base.list -> Base.string -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun

Like einsum, but adds instead than multiplying the resulting values.

val einsum1 : ?capture_dims:Shape.delayed_var_ref Base.list -> Base.string -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun

Similar to the explicit mode of numpy.einsum, the unary variant. Can permute axes, extract diagonals, compute traces etc.

Note that "a->c" from numpy is "a=>c" in OCANNL, since "->" is used to separate the input and the output axes.

module NDO_before_einmax1 : sig ... end
val einmax1 : ?capture_dims:Shape.delayed_var_ref Base.list -> Base.string -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_fun
val tropical : ?capture_dims:Shape.delayed_var_ref Base.list -> Base.string -> ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_fun

This generalizes the tropical matrix multiplication to arbitrary indices combinations.

val offsets : ?grad_spec:Tensor.grad_spec -> Tensor.op_fun

A fully-shape-inferred tensor that is initialized with the offset of each cell.

val range : ?label:Base.string list -> ?grad_spec:Tensor.grad_spec -> ?axis_label:Base.string -> Base__Int.t -> Tensor.t

range is a 1D tensor of shape upto, spans 0 inclusive, upto exclusive.

val range_of_shape : ?label:Base.string list -> ?grad_spec:Tensor.grad_spec -> ?batch_dims:Base.Int.t Base.List.t -> ?input_dims:Base.Int.t Base.List.t -> ?output_dims:Base.Int.t Base.List.t -> ?batch_axes:(Base.string * Base.Int.t) Base.List.t -> ?input_axes:(Base.string * Base.Int.t) Base.List.t -> ?output_axes:(Base.string * Base.Int.t) Base.List.t -> unit -> Tensor.t
val stop_gradient : Tensor.t -> Tensor.op_fun

A stop_gradient is an identity in the forward pass and a no-op in the backprop pass.

val embed_symbol : ?grad_spec:Tensor.grad_spec -> ?label:Base.string list -> Ir.Indexing.static_symbol -> Tensor.t
val embed_self_id : ?grad_spec:Tensor.grad_spec -> ?label:Base.string list -> unit -> Tensor.t
val embed_dim : ?grad_spec:Tensor.grad_spec -> ?label:Base.string list -> Shape.delayed_var_ref -> Tensor.t
val uniform : ?grad_spec:Tensor.grad_spec -> unit -> ?label:Base.string Base.list -> ?top_down_prec:Base.bool -> ?batch_dims:Base.int Base.list -> ?batch_axes:(Base.string * Base.int) Base.list -> Tensor.param_op_fun
val uniform_at : ?grad_spec:Tensor.grad_spec -> Tensor.t -> ?label:Base.string Base.list -> ?top_down_prec:Base.bool -> ?batch_dims:Base.int Base.list -> ?batch_axes:(Base.string * Base.int) Base.list -> Tensor.param_op_fun

Generates a single uniform random number using a counter symbol for PRNG state. This is useful for sequential sampling in recurrent contexts.

val uniform1 : ?grad_spec:Tensor.grad_spec -> unit -> ?label:Base.string Base.list -> ?top_down_prec:Base.bool -> ?batch_dims:Base.int Base.list -> ?batch_axes:(Base.string * Base.int) Base.list -> Tensor.param_op_fun

A wasteful variant of uniform that produces a single value from each 4x32 random bits.

val uniform_at1 : ?grad_spec:Tensor.grad_spec -> Tensor.t -> ?label:Base.string Base.list -> ?top_down_prec:Base.bool -> ?batch_dims:Base.int Base.list -> ?batch_axes:(Base.string * Base.int) Base.list -> Tensor.param_op_fun

A wasteful variant of uniform_at that produces a single value from each 4x32 random bits.

val reshape : l:Base.string -> ?b:Base.int Base.list -> ?i:Base.int Base.list -> ?o:Base.int Base.list -> Ir.Ndarray.t -> ?fetch_op:Tensor.fetch_op -> ?grad_spec:Tensor.grad_spec -> ?top_down_prec:Base.bool -> ?batch_axes:(Base.string * Base.int) Base.list -> ?input_axes:(Base.string * Base.int) Base.list -> ?output_axes:(Base.string * Base.int) Base.list -> ?deduced:Shape.deduce_within_shape -> Base.unit -> Tensor.t

The input i dimensions default to empty. The batch and output dimensions will be inferred if omitted. Note: the data should have no padding and if padding is inferred, the data will be copied; otherwise, the resulting tensor value shares host memory with the ndarray.

val wrap : l:Base.string -> ?b:Base.int Base.list -> ?i:Base.int Base.list -> ?o:Base.int Base.list -> Ir.Ndarray.t -> ?fetch_op:Tensor.fetch_op -> ?grad_spec:Tensor.grad_spec -> ?top_down_prec:Base.bool -> ?batch_axes:(Base.string * Base.int) Base.list -> ?input_axes:(Base.string * Base.int) Base.list -> ?output_axes:(Base.string * Base.int) Base.list -> ?deduced:Shape.deduce_within_shape -> Base.unit -> Tensor.t

The dimensions are taken from the provided ndarray, but the split into axis kinds still needs to be inferred (or provided). Assumes no padding. Input axes are not inferred (empty if omitted). See also: reshape and TDSL.wrap_param.

val wrap_padded : l:Base.string -> ?b:Base.int Base.list -> ?i:Base.int Base.list -> ?o:Base.int Base.list -> padding:Ir.Ops.axis_padding Base.array -> padded_value:Base.float -> Ir.Assignments.Nd.t -> ?fetch_op:Tensor.fetch_op -> ?grad_spec:Tensor.grad_spec -> ?top_down_prec:Base.bool -> ?batch_axes:(Base.string * Base.int) Base.list -> ?input_axes:(Base.string * Base.int) Base.list -> ?output_axes:(Base.string * Base.int) Base.list -> ?deduced:Shape.deduce_within_shape -> Base.unit -> Tensor.t

Assumes the ndarray is padded as given. This means the dimensions of the ndarray will differ from the dimensions of the tensor by the padding. See also: TDSL.wrap.

val rebatch : l:Base.string -> Ir.Ndarray.t -> ?fetch_op:Tensor.fetch_op -> ?grad_spec:Tensor.grad_spec -> ?top_down_prec:Base.bool -> ?batch_dims:Base.int Base.list -> ?batch_axes:(Base.string * Base.int) Base.list -> ?input_axes:(Base.string * Base.int) Base.list -> ?output_axes:(Base.string * Base.int) Base.list -> ?deduced:Shape.deduce_within_shape -> Base.unit -> Tensor.t

The output dimensions are taken from the provided ndarray, assuming precisely the first axis is a batch axis, assumes no input axes and the batch dimensions are inferred. Empty output dimensions are allowed and represent scalars. Assumes the data has no padding, and data is copied if padding is inferred. See also: reshape and wrap.

val init : l:Base.string -> prec:Ir.Ops.prec -> ?b:Base.int Base__List.t -> ?i:Base.int Base__List.t -> ?o:Base.int Base__List.t -> f:(Base.int Base.array -> Base.float) -> ?fetch_op:Tensor.fetch_op -> ?grad_spec:Tensor.grad_spec -> ?top_down_prec:Base.bool -> ?batch_axes:(Base.string * Base.int) Base.list -> ?input_axes:(Base.string * Base.int) Base.list -> ?output_axes:(Base.string * Base.int) Base.list -> ?deduced:Shape.deduce_within_shape -> Base.unit -> Tensor.t

Creates a tensor by initializing values using a function from indices to values. The dimensions are split into axis kinds as specified, there is no shape inference. Recall that input axes are rightmost.

module Make_DSL (Grad_spec : sig ... end) : sig ... end
module DSL_modules : sig ... end