
    9i=                         S SK r S SKJrJr  S SKrS SKJrJr  S SKJ	r	  S SK
Jr  S SKJr  S SKJr  SS	KJr  SS
KJr  S\l        S\l        S\l         " S S\5      rg)    N)AnyDict)Image	ImageFile)create_transform)
transforms)
load_image)ModeKeys   )OfaBasePreprocessor)RandomAugmentTc                      ^  \ rS rSrSr\R                  4U 4S jjrS\\	\
4   S\\	\
4   4S jrS\\	\
4   S\\	\
4   4S jrS\\	\
4   S\\	\
4   4S jrS	rU =r$ )
"OfaImageClassificationPreprocessor   z1
OFA preprocessor for image classification task.
c                 .  > [         [        U ]
   " XU/UQ70 UD6  U R                  [        R
                  :w  a  [        R                  " S [        R                  " U R                  U R                  4[        R                  R                  S9[        R                  " 5       [        R                  " U R                  U R                  S9/5      U l        g[#        U R                  SSSSSS	S
U R                  U R                  S9
U l        [        R                  " [$        R&                  " S S /U R                   R                  SS U R                   R                  S   /[)        SSS/ SQS9/U R                   R                  SS /5      5      U l        g)zpreprocess the data

Args:
    cfg(modelscope.utils.config.ConfigDict) : model config
    model_dir (str): model path,
    mode: preprocessor mode (model mode)
c                 $    U R                  S5      $ NRGBconvertimages    q/var/www/html/land-doc-ocr/venv/lib/python3.13/site-packages/modelscope/preprocessors/ofa/image_classification.py<lambda>=OfaImageClassificationPreprocessor.__init__.<locals>.<lambda>+   s    emmE2    )interpolation)meanstdTg?zrand-m9-mstd0.5-inc1bicubicg      ?pixelr   )

input_sizeis_trainingcolor_jitterauto_augmentr   re_probre_modere_countr   r   c                 
    X-   $ N )xys     r   r   r   ?   s    aer   c                 $    U R                  S5      $ r   r   r   s    r   r   r   A   s    emmE&:r   N      )
IdentityAutoContrastEqualize
Brightness	SharpnessShearXShearY
TranslateX
TranslateYRotate)isPILaugs   )superr   __init__moder
   TRAINr   ComposeResizepatch_image_sizeInterpolationModeBICUBICToTensor	Normalizer   r   patch_resize_transformr   	functoolsreducer   )selfcfg	model_dirr@   argskwargs	__class__s         r   r?   +OfaImageClassificationPreprocessor.__init__   s    	0		 T	D48	D<B	D 99&*4*<*<2!!**D,A,AB",">">"F"FH ##%$$$))B> +D' +;00  3'YYHH
+D' +5*<*<  !3: //::2A>00;;A>?%"&"	
 //::12>#6 +D'r   datareturnc                     U R                   [        R                  :X  a  U R                  U5      $ U R	                  U5      $ r*   )r@   r
   rA   _build_train_sample_build_infer_sample)rL   rS   s     r   __call__+OfaImageClassificationPreprocessor.__call__S   s4    99&++D11++D11r   c                 >   U R                  U5      nSR                  US   5      nUS   S0US'   U R                  USS9US'   [        R                  " U R
                  US   SS	 /5      US
'   U R                  b  [        R                  " [        US
   5      [        U R                  5      45      R                  5       n[        [        US
   5      5       H>  nUS
   SUS-    R                  5       nU R                  R                  U5      nSXE   U'   M@     XBS'   U$ )a  
Building training samples.

step 1. Preprocess the data using the logic of `_build_infer_sample`
    and make sure the label data in the result.
step 2. Preprocess the label data. Contains:
    - add ` ` before the label value and add `ref_dict` value
    - tokenize the label as `target` value without `bos` token.
    - add `bos` token and remove `eos` token of `target` as `prev_output_tokens`.
    - add constraints mask.

Args:
    data (`Dict[str, Any]`): Input data, should contains the key of `image`,
        `prompt` and `label`, `image` refers the image input data, `prompt`
        refers the text input data the `label` is the supervised data for training.
Return:
    A dict object, contains source, image, mask, label, target tokens,
    and previous output tokens data.
z {}labelg      ?ref_dictF)add_bostargetNprev_output_tokensr   Tconstraint_mask)rW   formattokenize_texttorchcatbos_itemconstraint_triezeroslentgt_dictboolrangetolistget_next_layer)rL   rS   sampler^   ra   iconstraint_prefix_tokenconstraint_nodess           r   rV   6OfaImageClassificationPreprocessor._build_train_sampleY   s;   ( ))$/fWo.$Wos3z--fe-Dx',yy]]F8,Sb12(4#$ +#kk3v6J/K+L+.t}}+=+? @@D 3v&:;<=*0(+**01q5+228&( (#'#7#7#F#F+$- 7;"#34 > )8$%r   c                    U R                  XR                  S      5      nU R                  U5      nU R                  R                  R                  SS5      nU R                  U5      nUU[        R                  " S/5      U R                  S.nSU R                  ;   a'  U R                  S   U;   a  XR                  S      US'   U$ )a  
Building inference samples.

step 1. Get the pillow image.
step 2. Do some transforms for the pillow image as the image input,
    such as resize, normalize, to tensor etc.
step 3. Tokenize the prompt as text input.
step 4. Determine Whether or not to add labels to the sample.

Args:
    data (`Dict[str, Any]`): Input data, should contains the key of `image` and `prompt`,
        the former refers the image input data, and the later refers the text input data.
Return:
    A dict object, contains source, image, mask and label data.
r   promptz what does the image describe?T)sourcepatch_image
patch_maskdecoder_prompttextr[   )
get_img_pil
column_maprI   rM   modelgetrc   rd   tensorrf   )rL   rS   r   rw   ru   inputsro   s          r   rW   6OfaImageClassificationPreprocessor._build_infer_sample   s        oog&>!?@11%8##H.NO##F+&,,v."mm	
 T__$)@D)H"??6#:;F7Or   )rI   )__name__
__module____qualname____firstlineno____doc__r
   	INFERENCEr?   r   strr   rX   rV   rW   __static_attributes____classcell__)rQ   s   @r   r   r      s     ((8t2T#s(^ 2S#X 2&S#X &4S> &PS#X 4S>  r   r   )rJ   typingr   r   rd   PILr   r   	timm.datar   torchvisionr   modelscope.preprocessors.imager	   modelscope.utils.constantr
   baser   utils.vision_helperr   LOAD_TRUNCATED_IMAGESMAX_IMAGE_PIXELSr   r+   r   r   <module>r      sJ         & " 5 . % ."&	 !	  I)< Ir   