
    9iL                     V    S SK JrJr  S SKrS SKJr  S SKJr  SSKJ	r	   " S S\	5      r
g)	    )AnyDictN)
transforms)ModeKeys   )OfaBasePreprocessorc                      ^  \ rS rSrSr\R                  4U 4S jjrS\\	\
4   S\\	\
4   4S jrS\\	\
4   S\\	\
4   4S jrS\\	\
4   S\\	\
4   4S jrS	rU =r$ )
OfaImageCaptioningPreprocessor   z-
OFA preprocessor for image captioning task.
c           
      p  > [         [        U ]
   " XU/UQ70 UD6  [        R                  " S [        R
                  " U R                  U R                  4[        R                  R                  S9[        R                  " 5       [        R                  " U R                  U R                  S9/5      U l        g)zpreprocess the data

Args:
    cfg(modelscope.utils.config.ConfigDict) : model config
    model_dir (str): model path,
    mode: preprocessor mode (model mode)
c                 $    U R                  S5      $ )NRGB)convert)images    m/var/www/html/land-doc-ocr/venv/lib/python3.13/site-packages/modelscope/preprocessors/ofa/image_captioning.py<lambda>9OfaImageCaptioningPreprocessor.__init__.<locals>.<lambda>!   s    %--.    )interpolation)meanstdN)superr
   __init__r   ComposeResizepatch_image_sizeInterpolationModeBICUBICToTensor	Normalizer   r   patch_resize_transform)selfcfg	model_dirmodeargskwargs	__class__s         r   r   'OfaImageCaptioningPreprocessor.__init__   s     	,		 T	D48	D<B	D '1&8&8.&&(=(=>(::BBD !  diiTXX>:
 '#r   datareturnc                     U R                   [        R                  :X  a  U R                  U5      $ U R	                  U5      $ )N)r%   r   TRAIN_build_train_sample_build_infer_sample)r"   r*   s     r   __call__'OfaImageCaptioningPreprocessor.__call__)   s4    99&++D11++D11r   c                 x   U R                  U5      nUS   nUR                  U R                  5      R                  5       nUR                  5       R	                  5       nSR                  USU R                   5      nU R                  USS9US'   [        R                  " U R                  US   SS /5      US'   U$ )	a  
Building training samples.

step 1. Preprocess the data using the logic of `_build_infer_sample`
    and make sure the label data in the result.
step 2. Preprocess the label data. Contains:
    - remove tokens within `!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~` and tripe
    - tokenize the label as `target` value without `bos` token.
    - add `bos` token and remove `eos` token of `target` as `prev_output_tokens`.

Args:
    data (`Dict[str, Any]`): Input data, should contains the key of `image`, `prompt`
        and `label`, `image` refers the image input data, `prompt` refers the text
        input data the `label` is the supervised data for training.
Return:
    A dict object, contains source, image, mask, label, target tokens,
    and previous output tokens data.
label NF)add_bostargetprev_output_tokens)r/   	translatetranstabstripsplitjoinmax_tgt_lengthtokenize_texttorchcatbos_item)r"   r*   sampler6   target_token_lists        r   r.   2OfaImageCaptioningPreprocessor._build_train_sample/   s    & ))$/!!$--0668"LLN002+,@T-@-@AB--fe-Dx',yy]]F8,Sb12(4#$r   c                 z   U R                  XR                  S      5      nU R                  U5      nU R                  R                  R                  SS5      nU R                  U5      nUU[        R                  " S/5      S.nSU R                  ;   a'  U R                  S   U;   a  XR                  S      US'   U$ )a  
Building inference samples.

step 1. Get the pillow image.
step 2. Do some transforms for the pillow image as the image input,
    such as resize, normalize, to tensor etc.
step 3. Tokenize the prompt as text input.
step 4. Determine Whether or not to add labels to the sample.

Args:
    data (`Dict[str, Any]`): Input data, should contains the key of `image` and `prompt`,
        the former refers the image input data, and the later refers the text input data.
Return:
    A dict object, contains source, image, mask and label data.
r   promptz what does the image describe?T)sourcepatch_image
patch_masktextr3   )	get_img_pil
column_mapr!   r#   modelgetr?   r@   tensor)r"   r*   r   rI   rG   inputsrC   s          r   r/   2OfaImageCaptioningPreprocessor._build_infer_sampleL   s        oog&>!?@11%8##H.NO##F+&,,v.

 T__$)@D)H"??6#:;F7Or   )r!   )__name__
__module____qualname____firstlineno____doc__r   	INFERENCEr   r   strr   r0   r.   r/   __static_attributes____classcell__)r(   s   @r   r
   r
      s     ((22T#s(^ 2S#X 2S#X 4S> :S#X 4S>  r   r
   )typingr   r   r@   torchvisionr   modelscope.utils.constantr   baser   r
    r   r   <module>ra      s#      " . %\%8 \r   