
    9i;"                         % S SK r S SKrS SKJr  S SKJr  S SKrS SKJrJ	r	   " S S5      r
S\S\
S	\4S
 jrS r\q\\S'   \ R                   S 5       r " S S5      rSS jrg)    N)Callable)
deprecated)KernelRegistrationHandlec                   x    \ rS rSrSrS\4S jr\S 5       r\R                  S 5       rSS.S	\
S
\S\4S jjrSrg)FakeImplHolder   z0A holder where one can register an fake impl to.qualnamec                     Xl         / U l        g N)r
   kernels)selfr
   s     X/var/www/html/land-doc-ocr/venv/lib/python3.13/site-packages/torch/_library/fake_impl.py__init__FakeImplHolder.__init__   s    % &(    c                 T    [        U R                  5      S:X  a  g U R                  S   $ )Nr   )lenr   )r   s    r   kernelFakeImplHolder.kernel   s%    t||!||Br   c                     [        S5      e)NzUnable to directly set kernel.RuntimeError)r   values     r   r   r      s    ;<<r   Fallow_overridefuncsourcereturnc                  ^ ^ U(       d  T R                   b0  [        ST R                   ST R                   R                   S35      e[        R
                  R                  T R                  S5      (       a  [        ST R                   S35      e[        R
                  R                  T R                  S5      (       a  [        ST R                   S35      e[        X5      mT R                  R                  T5        UU 4S jn[        T R                  T 5      nUR                  T R                  USUS	9  [        U5      nU$ )
zeRegister an fake impl.

Returns a RegistrationHandle that one can use to de-register this
fake impl.
z!register_fake(...): the operator z( already has an fake impl registered at .Metaz already has an DispatchKey::Meta implementation via a pre-existing torch.library or TORCH_LIBRARY registration. Please either remove that registration or don't call register_fake.CompositeImplicitAutograda%   already has an implementation for this device type via a pre-existing registration to DispatchKey::CompositeImplicitAutograd.CompositeImplicitAutograd operators do not need an fake impl; instead, the operator will decompose into its constituents and those can have fake impls defined on them.c                  <   > TR                   R                  T 5        g r   )r   remove)r   r   s   r   deregister_fake_kernel7FakeImplHolder.register.<locals>.deregister_fake_kernelN   s    LL'r   r   )r   r   r
   r   torch_C%_dispatch_has_kernel_for_dispatch_keyr   r   appendconstruct_meta_kernelimplr   )	r   r   r   libr   r'   meta_kernelhandler   s	   `       @r   registerFakeImplHolder.register"   s,    {{&"7 G>{{))*!- 
 xx==dmmVTT"7 G% &  xx==:  #7 G; <
 
 %F#	( ,DMM4@VNS#$:;r   )r   r
   N)__name__
__module____qualname____firstlineno____doc__strr   propertyr   setterr   r   r2   __static_attributes__ r   r   r   r      sf    :( (    
 ]]= = CH33&)3	3 3r   r   r
   fake_impl_holderr    c                    ^ ^ TR                   c   e[        R                  " TR                   R                  5      UU 4S j5       nU$ )Nc                     >^ TR                   c   eTR                   R                  mUU4S jn[        U5         TR                   " U 0 UD6sS S S 5        $ ! , (       d  f       g = f)Nc                  &   > [        T  ST S35      e)Nz (a  ): You're trying to run this operator with meta Tensors (as opposed to FakeTensors), but this operator may return an output Tensor with data-dependent shape. Meta Tensors don't support operators with outputs that have data-dependent shapes but FakeTensors do. If your operator does not return an output with data-dependent shape, make sure the FakeTensor and/or meta kernel does not call torch.library.get_ctx(). Otherwise, please use FakeTensors.r   )r
   r   s   r   error_on_ctx@construct_meta_kernel.<locals>.meta_kernel.<locals>.error_on_ctx`   s'    *Bvh 'N O	 	r   )r   r   set_ctx_getter)argskwargsrB   r   r>   r
   s      @r   r0   *construct_meta_kernel.<locals>.meta_kernel[   sT    &&222!((//
	 L)#**D;F; *))s   A
A$)r   	functoolswrapsr   )r
   r>   r0   s   `` r   r-   r-   X   sE    ""...__%,,112< 3<& r   c                      g r   r=   r=   r   r   get_nonerK   r   s    r   global_ctx_getterc              #   8   #    [         n U q S v   Uq g ! Uq f = f7fr   )rL   )
ctx_getterprevs     r   rD   rD   y   s&      D!& Ds    c                       \ rS rSrSrS r\" S\S9SSS.S	\R                  4S
 jj5       r
SSS.S	\R                  4S jjrSrg)FakeImplCtx   zG
Context object for writing fake implementations for custom operators.
c                 >    Xl         UR                  U l        X l        g r   )
_fake_mode	shape_env
_shape_env_op)r   rT   rW   s      r   r   FakeImplCtx.__init__   s    $$..r   zM`create_unbacked_symint` is deprecated, please use `new_dynamic_size` instead)category   Nminmaxr    c                     U R                  XS9$ Nr[   )new_dynamic_sizer   r\   r]   s      r   create_unbacked_symint"FakeImplCtx.create_unbacked_symint   s    
 $$$66r   r   c                   U R                   b  U R                   R                  (       d3  [        R                  R                  R                  U R                  5      e[        U[        R                  5      (       d  [        U[        R                  5      (       a  [        SU SU S35      eUS:  a  [        SU S35      e[        U R                   X5      $ )a  Constructs a new symint (symbolic int) representing a data-dependent value.

This is useful for writing the fake implementation (which is necessary
for torch.compile) for a CustomOp where an output Tensor has a size
that depends on the data of the input Tensors.

Args:
    min (int): A statically known inclusive lower bound for this symint. Default: 0
    max (Optional[int]): A statically known inclusive upper bound for this
        symint. Default: None

.. warning:

    It is important that the ``min`` and ``max`` (if not None) values are set
    correctly, otherwise, there will be undefined behavior under
    torch.compile. The default value of ``min`` is 2 due to torch.compile
    specializing on 0/1 sizes.

    You must also verify that your implementation on concrete Tensors
    (e.g. CPU/CUDA) only returns Tensors where the size that corresponds
    to the symint also has respects these constraint.
    The easiest way to do this is to add an assertion in the CPU/CUDA/etc
    implementation that the size follows these bounds.

Example::

    >>> # An operator with data-dependent output shape
    >>> lib = torch.library.Library("mymodule", "FRAGMENT")
    >>> lib.define("mymodule::custom_nonzero(Tensor x) -> Tensor")
    >>>
    >>> @torch.library.register_fake("mymodule::custom_nonzero")
    >>> def _(x):
    >>>     # Number of nonzero-elements is data-dependent.
    >>>     # Since we cannot peek at the data in an fake impl,
    >>>     # we use the ctx object to construct a new symint that
    >>>     # represents the data-dependent size.
    >>>     ctx = torch.library.get_ctx()
    >>>     nnz = ctx.new_dynamic_size()
    >>>     shape = [nnz, x.dim()]
    >>>     result = x.new_empty(shape, dtype=torch.int64)
    >>>     return result
    >>>
    >>> @torch.library.impl(lib, "custom_nonzero", "CPU")
    >>> def _(x):
    >>>     x_np = x.numpy()
    >>>     res = np.stack(np.nonzero(x_np), axis=1)
    >>>     return torch.tensor(res, device=x.device)

zctx.new_dynamic_size(min=z, max=zZ): expected min and max to be statically known ints but got SymInt. This is not supported.r   zc, ...): expected min to be greater than or equal to 0: this API can only create non-negative sizes.)rV   allow_dynamic_output_shape_opsr)   _subclassesfake_tensorDynamicOutputShapeExceptionrW   
isinstanceSymInt
ValueErrorallocate_sizera   s      r   r`   FakeImplCtx.new_dynamic_size   s    f OO#??AA##//KKDHHUUc5<<((JsELL,I,I+C5se <) *  7+C5 1& '  T__c77r   )rT   rW   rV   )r4   r5   r6   r7   r8   r   r   FutureWarningr)   rj   rb   r`   r<   r=   r   r   rQ   rQ      s\    
 W -.4 7ELL 7	7 '(T F8ell F8 F8r   rQ   c                     U R                  5       n[        R                  R                  R                  R                  X1US9  U$ r_   )rb   r)   fxexperimentalsymbolic_shapes_constrain_range_for_size)rU   min_valmax_valresults       r   rl   rl      s@    --/F	HH))CC D  Mr   )r   N)
contextlibrH   typingr   typing_extensionsr   r)   torch._library.utilsr   r   r   r9   r-   rK   rL   __annotations__contextmanagerrD   rQ   rl   r=   r   r   <module>r}      s|       (  ;J JZC > h 4 ' 8 & ! !W8 W8tr   