Ir.Low_levelmodule Scope_id : sig ... endval sexp_of_scope_id : scope_id -> Sexplib0.Sexp.tval hash_fold_scope_id :
Ppx_hash_lib.Std.Hash.state ->
scope_id ->
Ppx_hash_lib.Std.Hash.stateval hash_scope_id : scope_id -> Ppx_hash_lib.Std.Hash.hash_valuetype t = | Noop| Comment of Base.string| Staged_compilation of Base.unit -> PPrint.document| Seq of t * t| For_loop of {index : Indexing.symbol;from_ : Base.int;to_ : Base.int;body : t;trace_it : Base.bool;}| Zero_out of Tnode.t| Set of {tn : Tnode.t;idcs : Indexing.axis_index Base.array;llsc : scalar_t;mutable debug : Base.string;}| Set_from_vec of {tn : Tnode.t;idcs : Indexing.axis_index Base.array;length : Base.int;vec_unop : Ops.vec_unop;arg : scalar_arg;mutable debug : Base.string;}| Set_local of scope_id * scalar_tCases: t -- code, scalar_t -- single number at some precision.
and scalar_t = | Local_scope of {id : scope_id;body : t;orig_indices : Indexing.axis_index Base.array;}| Get_local of scope_id| Get of Tnode.t * Indexing.axis_index Base.array| Get_merge_buffer of Tnode.t * Indexing.axis_index Base.array| Ternop of Ops.ternop * scalar_arg * scalar_arg * scalar_arg| Binop of Ops.binop * scalar_arg * scalar_arg| Unop of Ops.unop * scalar_arg| Constant of Base.float| Constant_bits of Base.int64Direct bit representation, primarily for uint4x32
*)| Embed_index of Indexing.axis_indexThe argument precision is preserved in heterogeneous precision operation arguments, and is ignored (overridden) in homogeneous precision operations.
val sexp_of_t : t -> Sexplib0.Sexp.tval sexp_of_scalar_t : scalar_t -> Sexplib0.Sexp.tval sexp_of_scalar_arg : scalar_arg -> Sexplib0.Sexp.tval equal_scalar_arg : scalar_arg -> scalar_arg -> Base.boolval compare_scalar_arg : scalar_arg -> scalar_arg -> Base.intval loop_over_dims :
Base.int Base.array ->
body:(Indexing.axis_index Base.array -> t) ->
tval unroll_dims :
Base.int Base.array ->
body:(Indexing.axis_index Base.array -> offset:Base.int -> t) ->
tval virtualize_settings : virtualize_settingsval sexp_of_visits : visits -> Sexplib0.Sexp.tval visits_of_sexp : Sexplib0.Sexp.t -> visitsval visits : Base.int -> visitsval recurrent : visitsval is_visits : visits -> Base.boolval is_recurrent : visits -> Base.boolval visits_val : visits -> Base.int Base.optionval recurrent_val : visits -> Base.unit Base.optionmodule Variants_of_visits : sig ... endtype traced_array = {tn : Tnode.t;assignments : Base.int Base.array Base.Hash_set.t;accesses : (Base.int Base.array, visits) Base.Hashtbl.t;mutable zero_initialized_by_code : Base.bool;mutable zeroed_out : Base.bool;mutable read_before_write : Base.bool;The node is read before it is written (i.e. it is recurrent).
*)mutable read_only : Base.bool;Surprisingly, the notions of read-only and of constant memory mode come apart: small hosted constants are not read-only because they are initialized on devices by being assigned to; and a volatile memory mode is read-only from the devices' perspective.
*)mutable is_scalar_constexpr : Base.bool;True only if the tensor node has all axes of dimension 1, is either zeroed-out or assigned before accessed, is assigned at most once, and from an expression involving only constants or tensor nodes that were at the time is_scalar_constexpr.
*)mutable is_accessing : Base.bool;False only if the tensor node is built from index embeddings and scalar constant expressions.
*)mutable is_complex : Base.bool;True only if the tensor node is built acciessing computations that are not a single getter.
*)}val sexp_of_traced_array : traced_array -> Sexplib0.Sexp.tval get_node :
(Tnode.t, traced_array) Base.Hashtbl.t ->
Tnode.t ->
traced_arraytype traced_store = (Tnode.t, traced_array) Base.Hashtbl.tval sexp_of_traced_store : traced_store -> Sexplib0.Sexp.ttype optimize_ctx = {computations : (Tnode.t,
(Indexing.axis_index Base.array Base.option * t) Base.list)
Base.Hashtbl.t;The computations (of the tensor node) are retrieved for optimization just as they are populated, so that the inlined code corresponds precisely to the changes to the arrays that would happen up till that point. Within the code blocks paired with an index tuple, all assignments and accesses must happen via the index tuple; if this is not the case for some assignment, the node cannot be virtual. Currently, we only allow for-loop symbols in assignment indices of virtual nodes.
*)}val sexp_of_optimize_ctx : optimize_ctx -> Sexplib0.Sexp.ttype optimized = {traced_store : traced_store;optimize_ctx : optimize_ctx;llc : t;merge_node : Tnode.t Base.option;}val sexp_of_optimized : optimized -> Sexplib0.Sexp.tval optimize :
optimize_ctx ->
unoptim_ll_source:(PPrint.document -> Base.unit) Base.option ->
ll_source:(PPrint.document -> Base.unit) Base.option ->
name:Base.string ->
Indexing.static_symbol Base.list ->
t ->
optimizedval input_and_output_nodes :
optimized ->
(Base.Set.M(Ir.Tnode).t * Base.Set.M(Ir.Tnode).t) * Tnode.t Base.optionInputs are the materialized read-only and read-before-write (within the code) non-constant non-merge nodes. They are inputs in a broad sense, as they could be recurrent nodes or parameters. Outputs are all the materialized nodes written-to by the code. The last returned component is the input merge node, if used in the code.
val function_header_doc :
?name:Base.string ->
?static_indices:Indexing.static_symbol Base.list ->
Base.unit ->
PPrint.documentval to_doc_cstyle :
?name:Base.string ->
?static_indices:Indexing.static_symbol Base.list ->
Base.unit ->
t ->
PPrint.documentAdheres more to the C syntax, outputs implicit type casts.
val to_doc :
?name:Base.string ->
?static_indices:Indexing.static_symbol Base.list ->
Base.unit ->
t ->
PPrint.documentAdheres to the %cd syntax.