Ocannl_tensor.OperationComputational primitives for neural networks, integrating Tensor with Assignments.
module Asgns = Ir.Assignmentsmodule Idx = Ir.Indexingmodule Tn = Ir.Tnodemodule At : sig ... endmodule Initial_NTDSL : sig ... endval add : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_funval sub : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_funval mul :
Shape.compose_type ->
op_asn:
(v:Tensor.tn ->
t1:Tensor.t ->
t2:Tensor.t ->
projections:Tensor.projections ->
Tensor.comp) ->
?op_label:Base.string ->
?grad_spec:Tensor.grad_spec ->
Tensor.t ->
Tensor.t ->
Tensor.op_funval pointmul :
?grad_spec:Tensor.grad_spec ->
Tensor.t ->
Tensor.t ->
Tensor.op_funval matmul :
?grad_spec:Tensor.grad_spec ->
Tensor.t ->
Tensor.t ->
Tensor.op_funmodule NDO_before_pow : sig ... endval is_prohibit_grad : Tensor.grad_spec option -> boolval pointpow :
?grad_spec:Tensor.grad_spec ->
Base.float ->
Tensor.t ->
Tensor.op_funmodule NDO_before_div : sig ... endmodule NTDSL_before_div : sig ... endval pointdiv :
?grad_spec:Tensor.grad_spec ->
Tensor.t ->
Tensor.t ->
Tensor.op_funval relu : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_funval sat01 : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_funval exp : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_funval log : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_funval exp2 : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_funval log2 : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_funval sin : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_funval cos : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_funval sqrt : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_funval recip : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_funval recip_sqrt : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_funval tanh : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_funval neg : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_funval not : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.op_funval uint4x32_to_prec_uniform :
?grad_spec:Tensor.grad_spec ->
Tensor.t ->
?label:Base.string Base.list ->
?top_down_prec:Base.bool ->
?batch_dims:Base.int Base.list ->
?batch_axes:(Base.string * Base.int) Base.list ->
Tensor.param_op_funval uint4x32_to_prec_uniform1 :
?grad_spec:Tensor.grad_spec ->
Tensor.t ->
?label:Base.string Base.list ->
?top_down_prec:Base.bool ->
?batch_dims:Base.int Base.list ->
?batch_axes:(Base.string * Base.int) Base.list ->
Tensor.param_op_funval lt : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_funval eq : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_funval ne : ?grad_spec:Tensor.grad_spec -> Tensor.t -> Tensor.t -> Tensor.op_funval threefry4x32_crypto :
Tensor.t ->
Tensor.t ->
?grad_spec:Tensor.grad_spec ->
?label:Base.string Base.list ->
?batch_dims:Base.int Base.list ->
?batch_axes:(Base.string * Base.int) Base.list ->
?input_dims:Base.int Base.list ->
?output_dims:Base.int Base.list ->
?input_axes:(Base.string * Base.int) Base.list ->
?output_axes:(Base.string * Base.int) Base.list ->
?deduced:Shape.deduce_within_shape ->
unit ->
Tensor.tval threefry4x32_light :
Tensor.t ->
Tensor.t ->
?grad_spec:Tensor.grad_spec ->
?label:Base.string Base.list ->
?batch_dims:Base.int Base.list ->
?batch_axes:(Base.string * Base.int) Base.list ->
?input_dims:Base.int Base.list ->
?output_dims:Base.int Base.list ->
?input_axes:(Base.string * Base.int) Base.list ->
?output_axes:(Base.string * Base.int) Base.list ->
?deduced:Shape.deduce_within_shape ->
unit ->
Tensor.tval threefry4x32 :
Tensor.t ->
Tensor.t ->
?grad_spec:Tensor.grad_spec ->
?label:Base.string Base.list ->
?batch_dims:Base.int Base.list ->
?batch_axes:(Base.string * Base.int) Base.list ->
?input_dims:Base.int Base.list ->
?output_dims:Base.int Base.list ->
?input_axes:(Base.string * Base.int) Base.list ->
?output_axes:(Base.string * Base.int) Base.list ->
?deduced:Shape.deduce_within_shape ->
unit ->
Tensor.tval fma :
grad_spec:Tensor.grad_spec ->
Tensor.t ->
Tensor.t ->
Tensor.t ->
Tensor.op_funval where :
grad_spec:Tensor.grad_spec ->
Tensor.t ->
Tensor.t ->
Tensor.t ->
Tensor.op_funval einsum :
?capture_dims:Shape.delayed_var_ref Base.list ->
Base.string ->
?grad_spec:Tensor.grad_spec ->
Tensor.t ->
Tensor.t ->
Tensor.op_funSimilar to the explicit mode of numpy.einsum, the binary variant. Can compute various forms of matrix multiplication, inner and outer products, etc.
Note that "a,b->c" from numpy is "a;b=>c" in OCANNL, since "->" is used to separate the input and the output axes.
val outer_sum :
?capture_dims:Shape.delayed_var_ref Base.list ->
Base.string ->
?grad_spec:Tensor.grad_spec ->
Tensor.t ->
Tensor.t ->
Tensor.op_funLike einsum, but adds instead than multiplying the resulting values.
val einsum1 :
?capture_dims:Shape.delayed_var_ref Base.list ->
Base.string ->
?grad_spec:Tensor.grad_spec ->
Tensor.t ->
Tensor.op_funSimilar to the explicit mode of numpy.einsum, the unary variant. Can permute axes, extract diagonals, compute traces etc.
Note that "a->c" from numpy is "a=>c" in OCANNL, since "->" is used to separate the input and the output axes.
module NDO_before_einmax1 : sig ... endval einmax1 :
?capture_dims:Shape.delayed_var_ref Base.list ->
Base.string ->
?grad_spec:Tensor.grad_spec ->
Tensor.t ->
Tensor.op_funval tropical :
?capture_dims:Shape.delayed_var_ref Base.list ->
Base.string ->
?grad_spec:Tensor.grad_spec ->
Tensor.t ->
Tensor.t ->
Tensor.op_funThis generalizes the tropical matrix multiplication to arbitrary indices combinations.
val offsets : ?grad_spec:Tensor.grad_spec -> Tensor.op_funA fully-shape-inferred tensor that is initialized with the offset of each cell.
val range :
?label:Base.string list ->
?grad_spec:Tensor.grad_spec ->
?axis_label:Base.string ->
Base__Int.t ->
Tensor.trange is a 1D tensor of shape upto, spans 0 inclusive, upto exclusive.
val range_of_shape :
?label:Base.string list ->
?grad_spec:Tensor.grad_spec ->
?batch_dims:Base.Int.t Base.List.t ->
?input_dims:Base.Int.t Base.List.t ->
?output_dims:Base.Int.t Base.List.t ->
?batch_axes:(Base.string * Base.Int.t) Base.List.t ->
?input_axes:(Base.string * Base.Int.t) Base.List.t ->
?output_axes:(Base.string * Base.Int.t) Base.List.t ->
unit ->
Tensor.tval stop_gradient : Tensor.t -> Tensor.op_funA stop_gradient is an identity in the forward pass and a no-op in the backprop pass.
val slice :
Idx.static_symbol ->
?grad_spec:Tensor.grad_spec ->
Tensor.t ->
Tensor.op_funval embed_symbol :
?grad_spec:Tensor.grad_spec ->
?label:Base.string list ->
Ir.Indexing.static_symbol ->
Tensor.tval embed_self_id :
?grad_spec:Tensor.grad_spec ->
?label:Base.string list ->
unit ->
Tensor.tval embed_dim :
?grad_spec:Tensor.grad_spec ->
?label:Base.string list ->
Shape.delayed_var_ref ->
Tensor.tval uniform :
?grad_spec:Tensor.grad_spec ->
unit ->
?label:Base.string Base.list ->
?top_down_prec:Base.bool ->
?batch_dims:Base.int Base.list ->
?batch_axes:(Base.string * Base.int) Base.list ->
Tensor.param_op_funval uniform_at :
?grad_spec:Tensor.grad_spec ->
Tensor.t ->
?label:Base.string Base.list ->
?top_down_prec:Base.bool ->
?batch_dims:Base.int Base.list ->
?batch_axes:(Base.string * Base.int) Base.list ->
Tensor.param_op_funGenerates a single uniform random number using a counter symbol for PRNG state. This is useful for sequential sampling in recurrent contexts.
val uniform1 :
?grad_spec:Tensor.grad_spec ->
unit ->
?label:Base.string Base.list ->
?top_down_prec:Base.bool ->
?batch_dims:Base.int Base.list ->
?batch_axes:(Base.string * Base.int) Base.list ->
Tensor.param_op_funA wasteful variant of uniform that produces a single value from each 4x32 random bits.
val uniform_at1 :
?grad_spec:Tensor.grad_spec ->
Tensor.t ->
?label:Base.string Base.list ->
?top_down_prec:Base.bool ->
?batch_dims:Base.int Base.list ->
?batch_axes:(Base.string * Base.int) Base.list ->
Tensor.param_op_funA wasteful variant of uniform_at that produces a single value from each 4x32 random bits.
val reshape :
l:Base.string ->
?b:Base.int Base.list ->
?i:Base.int Base.list ->
?o:Base.int Base.list ->
Ir.Ndarray.t ->
?fetch_op:Tensor.fetch_op ->
?grad_spec:Tensor.grad_spec ->
?top_down_prec:Base.bool ->
?batch_axes:(Base.string * Base.int) Base.list ->
?input_axes:(Base.string * Base.int) Base.list ->
?output_axes:(Base.string * Base.int) Base.list ->
?deduced:Shape.deduce_within_shape ->
Base.unit ->
Tensor.tThe input i dimensions default to empty. The batch and output dimensions will be inferred if omitted. Note: the data should have no padding and if padding is inferred, the data will be copied; otherwise, the resulting tensor value shares host memory with the ndarray.
val wrap :
l:Base.string ->
?b:Base.int Base.list ->
?i:Base.int Base.list ->
?o:Base.int Base.list ->
Ir.Ndarray.t ->
?fetch_op:Tensor.fetch_op ->
?grad_spec:Tensor.grad_spec ->
?top_down_prec:Base.bool ->
?batch_axes:(Base.string * Base.int) Base.list ->
?input_axes:(Base.string * Base.int) Base.list ->
?output_axes:(Base.string * Base.int) Base.list ->
?deduced:Shape.deduce_within_shape ->
Base.unit ->
Tensor.tThe dimensions are taken from the provided ndarray, but the split into axis kinds still needs to be inferred (or provided). Assumes no padding. Input axes are not inferred (empty if omitted). See also: reshape and TDSL.wrap_param.
val wrap_padded :
l:Base.string ->
?b:Base.int Base.list ->
?i:Base.int Base.list ->
?o:Base.int Base.list ->
padding:Ir.Ops.axis_padding Base.array ->
padded_value:Base.float ->
Ir.Assignments.Nd.t ->
?fetch_op:Tensor.fetch_op ->
?grad_spec:Tensor.grad_spec ->
?top_down_prec:Base.bool ->
?batch_axes:(Base.string * Base.int) Base.list ->
?input_axes:(Base.string * Base.int) Base.list ->
?output_axes:(Base.string * Base.int) Base.list ->
?deduced:Shape.deduce_within_shape ->
Base.unit ->
Tensor.tAssumes the ndarray is padded as given. This means the dimensions of the ndarray will differ from the dimensions of the tensor by the padding. See also: TDSL.wrap.
val rebatch :
l:Base.string ->
Ir.Ndarray.t ->
?fetch_op:Tensor.fetch_op ->
?grad_spec:Tensor.grad_spec ->
?top_down_prec:Base.bool ->
?batch_dims:Base.int Base.list ->
?batch_axes:(Base.string * Base.int) Base.list ->
?input_axes:(Base.string * Base.int) Base.list ->
?output_axes:(Base.string * Base.int) Base.list ->
?deduced:Shape.deduce_within_shape ->
Base.unit ->
Tensor.tThe output dimensions are taken from the provided ndarray, assuming precisely the first axis is a batch axis, assumes no input axes and the batch dimensions are inferred. Empty output dimensions are allowed and represent scalars. Assumes the data has no padding, and data is copied if padding is inferred. See also: reshape and wrap.
val init :
l:Base.string ->
prec:Ir.Ops.prec ->
?b:Base.int Base__List.t ->
?i:Base.int Base__List.t ->
?o:Base.int Base__List.t ->
f:(Base.int Base.array -> Base.float) ->
?fetch_op:Tensor.fetch_op ->
?grad_spec:Tensor.grad_spec ->
?top_down_prec:Base.bool ->
?batch_axes:(Base.string * Base.int) Base.list ->
?input_axes:(Base.string * Base.int) Base.list ->
?output_axes:(Base.string * Base.int) Base.list ->
?deduced:Shape.deduce_within_shape ->
Base.unit ->
Tensor.tCreates a tensor by initializing values using a function from indices to values. The dimensions are split into axis kinds as specified, there is no shape inference. Recall that input axes are rightmost.
module DSL_modules : sig ... end