
    3Ki              +       Z   S SK Jr  SSKJrJr  SSKJrJrJrJrJ	r	J
r
Jr  SS/r " S S\5      rSS	\
 S
\	 S\ S\ S\ S\ S3-   \l               S$S\\   S\\   S\\   S\\   S\\   S\\   S\S-  S\S\S\S-  S\S-  S\S-  S\S\S\\-  S\\-  S\\-  S\S \S!\S"S4*S# jjrg)%    )Tensor   )Adamadam)_capturable_doc_differentiable_doc_foreach_doc
_fused_doc_maximize_doc_params_docParamsTAdamWadamwc                      ^  \ rS rSr     SSSSSSS.S\S\\-  S\\\-  \\-  4   S\S	\S
\S\S\S-  S\S\S\S-  SS4U 4S jjjjr	U 4S jr
SrU =r$ )r      FN)maximizeforeach
capturabledifferentiablefusedparamslrbetasepsweight_decayamsgradr   r   r   r   r   returnc                6   > [         TU ]  UUUUUUUUU	U
USS9  g )NT)r   r   r   r   r   decoupled_weight_decay)super__init__)selfr   r   r   r   r   r   r   r   r   r   r   	__class__s               S/var/www/html/dynamic-report/venv/lib/python3.13/site-packages/torch/optim/adamw.pyr!   AdamW.__init__   s;     	!)#' 	 	
    c                 T   > [         TU ]  U5        U R                   H  nSUS'   M
     g )NTr   )r    __setstate__param_groups)r"   stategroupr#   s      r$   r(   AdamW.__setstate__6   s+    U#&&E.2E*+ 'r&    )gMbP?)g?g+?g:0yE>g{Gz?F)__name__
__module____qualname____firstlineno__r   floatr   tupleboolr!   r(   __static_attributes____classcell__)r#   s   @r$   r   r      s     "7C"
 # $!

 FN
 UV^UV^34	

 
 
 
 
 
 
 
 d{
 

 
B3 3r&   a  Implements AdamW algorithm, where weight decay does not accumulate in the momentum nor variance.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{(lr)}, \: \beta_1, \beta_2
                \text{(betas)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)},
                \: \epsilon \text{ (epsilon)}                                                    \\
            &\hspace{13mm}      \lambda \text{(weight decay)},  \: \textit{amsgrad},
                \: \textit{maximize}                                                             \\
            &\textbf{initialize} : m_0 \leftarrow 0 \text{ (first moment)}, v_0 \leftarrow 0
                \text{ ( second moment)}, \: v_0^{max}\leftarrow 0                        \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\

            &\hspace{5mm}\textbf{if} \: \textit{maximize}:                                       \\
            &\hspace{10mm}g_t           \leftarrow   -\nabla_{\theta} f_t (\theta_{t-1})         \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})          \\
            &\hspace{5mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1}         \\
            &\hspace{5mm}m_t           \leftarrow   \beta_1 m_{t-1} + (1 - \beta_1) g_t          \\
            &\hspace{5mm}v_t           \leftarrow   \beta_2 v_{t-1} + (1-\beta_2) g^2_t          \\
            &\hspace{5mm}\widehat{m_t} \leftarrow   m_t/\big(1-\beta_1^t \big)                   \\
            &\hspace{5mm}\textbf{if} \: amsgrad                                                  \\
            &\hspace{10mm} v_t^{max} \leftarrow \mathrm{max}(v_{t-1}^{max},v_t)                  \\
            &\hspace{10mm}\widehat{v_t} \leftarrow v_t^{max}/\big(1-\beta_2^t \big)              \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}\widehat{v_t} \leftarrow   v_t/\big(1-\beta_2^t \big)                  \\
            &\hspace{5mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
                \big(\sqrt{\widehat{v_t}} + \epsilon \big)                                       \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `Decoupled Weight Decay Regularization`_.
    z
    Args:
        ao  
        lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR
            is not yet supported for all our implementations. Please use a float
            LR if you are not also specifying fused=True or capturable=True.
        betas (tuple[Union[float, Tensor], Union[float, Tensor]], optional):
            coefficients used for computing running averages of gradient and
            its square. If a tensor is provided, must be 1-element. (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay coefficient (default: 1e-2)
        amsgrad (bool, optional): whether to use the AMSGrad variant of this
            algorithm from the paper `On the Convergence of Adam and Beyond`_
            (default: False)
        z	
        a8  
    .. Note::
        A prototype implementation of Adam and AdamW for MPS supports `torch.float32` and `torch.float16`.
    .. _Decoupled Weight Decay Regularization:
        https://arxiv.org/abs/1711.05101
    .. _On the Convergence of Adam and Beyond:
        https://openreview.net/forum?id=ryQu7f-RZ

    Nr   gradsexp_avgsexp_avg_sqsmax_exp_avg_sqsstate_stepsr   r   r   r   
grad_scale	found_infhas_complexr   beta1beta2r   r   r   r   r   c                B    [        U UUUUU4UUUU	U
UUUUUUUUUSS.6  g)zhFunctional API that performs AdamW algorithm computation.

See :class:`~torch.optim.AdamW` for details.
T)r   r   r   r   r<   r=   r>   r   r?   r@   r   r   r   r   r   N)r   )r   r7   r8   r9   r:   r;   r   r   r   r   r<   r=   r>   r   r?   r@   r   r   r   r   s                       r$   r   r      sR    : 	 %!#+r&   )NFFNNNF)torchr   r   r   	optimizerr   r   r	   r
   r   r   r   __all__r   __doc__listr4   r2   r   r-   r&   r$   <module>rG      s       G
%3D %3R$J		 	 
 		 		 		 		 'K@ ^    $#3L3<3 6l3 f	3
 &\3 f3 D[3 3 3 $;3 3 }3 3" #3$ 6>%3& 6>'3( 	)3* +3, 
-3. /30 
13r&   