
    Vh                     n    d dl mZ d dlmZmZ d dlmZ d dlmZ d dl	m
Z
  G d dej                        Zy)    )Mapping)AnyUnionN)Tensor)ShardedTensorc                   z    e Zd Zdeeeeef   f   fdZdde	fdZ
ddZdeeef   fdZd	eeef   fd
ZdefdZy)ShardedOptimizernamed_paramsc                 ^   g }|j                         D ]J  }t        |t              r'|j                  d |j	                         D               :|j                  |       L || _         ||g|i || _        | j                  j                  | _        | j                  j                  | _	        y)a  
        ShardedOptimizer collects all tensors and local shard tensors of
        ShardedTensor, then use these tensors as ``params`` for optimizers

        Args:
            named_params (Dict[str, Union[Tensor, ShardedTensor]]) : a Dict
                of parameters, where key is the parameter key, value is either
                Tensor or ShardedTensor parameter.
            optimizer_class (torch.optim.Optimizer): the Optimizer to use
                locally, i.e. torch.optim.SGD, torch.optim.Adagrad, etc.
            *optimizer_args: the arguments to initialize the optimizer.
            **optimizer_kwargs: the key-word arguments to initialize the optimizer.

        c              3   4   K   | ]  }|j                     y wN)tensor).0local_shards     Z/home/dcms/DCMS/lib/python3.12/site-packages/torch/distributed/_shard/sharded_optim/api.py	<genexpr>z,ShardedOptimizer.__init__.<locals>.<genexpr>#   s      +6K&&s   N)
values
isinstancer   extendlocal_shardsappendr
   _optimparam_groupsstate)selfr
   optimizer_classoptimizer_argsoptimizer_kwargstensorsvalues          r   __init__zShardedOptimizer.__init__   s    * !#!((* 	&E%/ :?:L:L:N  u%	& )%gSSBRS KK44[[&&
    set_to_nonec                 :    | j                   j                  |       y)a  Resets the gradients of all optimized :class:`torch.Tensor` s.

        Args:
            set_to_none (bool): instead of setting to zero, set the grads to None.
                This will in general have lower memory footprint, and can modestly improve performance.
                However, it changes certain behaviors. For example:
                1. When the user tries to access a gradient and perform manual ops on it,
                a None attribute or a Tensor full of 0s will behave differently.
                2. If the user requests ``zero_grad(set_to_none=True)`` followed by a backward pass, ``.grad``\ s
                are guaranteed to be None for params that did not receive a gradient.
                3. ``torch.optim`` optimizers have a different behavior if the gradient is 0 or None
                (in one case it does the step with a gradient of 0 and in the other it skips
                the step altogether).
        N)r   	zero_grad)r   r#   s     r   r%   zShardedOptimizer.zero_grad.   s     	k*r"   Nc                 :    | j                   j                  |       y)af  Performs a single optimization step (parameter update).

        Args:
            closure (Callable): A closure that reevaluates the model and
                returns the loss. Optional for most optimizers.

        .. note::
            Unless otherwise specified, this function should not modify the
            ``.grad`` field of the parameters.
        N)r   step)r   closures     r   r'   zShardedOptimizer.step?   s     	!r"   returnc                     t        d      )z
        Returned state and param_groups will contain parameter keys
        instead of parameter indices like torch.optim.Optimizer.
        This allows for advanced functionality like optimizer re-sharding to be implemented.
        z0ShardedOptimizer state_dict not implemented yet!NotImplementedError)r   s    r   
state_dictzShardedOptimizer.state_dictL   s     ""TUUr"   r-   c                     t        d      )zLoads the ShardedOptimizer state.

        Args:
            state_dict (dict): ShardedOptimizer state. Should be an object returned
                from a call to :meth:`state_dict`.
        z5ShardedOptimizer load_state_dict not implemented yet!r+   )r   r-   s     r   load_state_dictz ShardedOptimizer.load_state_dictU   s     "C
 	
r"   param_groupc                     t        d      )zAdd a new param groupz5ShardedOptimizer add_param_group not implemented yet!r+   )r   r0   s     r   add_param_groupz ShardedOptimizer.add_param_groupa   s     "C
 	
r"   )Tr   )__name__
__module____qualname__r   strr   r   r   r!   boolr%   r'   dictr   r-   r/   r2    r"   r   r	   r	   
   sl    !'c5)>#??@!'F+T +""VDcN V

'#s(*; 


3 
r"   r	   )collections.abcr   typingr   r   torch.optimoptimtorchr   'torch.distributed._shard.sharded_tensorr   	Optimizerr	   r9   r"   r   <module>rA      s&    #    A\
u \
r"   