frank921 revisó este gist . Ir a la revisión
1 file changed, 56 insertions
gistfile1.txt(archivo creado)
| @@ -0,0 +1,56 @@ | |||
| 1 | + | (segmentation) PS H:\workspaces\segmentation_api> python main.py --image tests/images/cat_3.jpg | |
| 2 | + | preprocessor_config.json: 100%|███████████████████████████████████████████████████████████████| 406/406 [00:00<?, ?B/s] | |
| 3 | + | C:\Users\junyu\anaconda3\envs\segmentation\lib\site-packages\huggingface_hub\file_download.py:143: UserWarning: `huggingface_hub` cache-system uses symlinks by default to efficiently store duplicated files but your machine does not support them in C:\Users\junyu\.cache\huggingface\hub\models--google--deeplabv3_mobilenet_v2_1.0_513. Caching files will still work but in a degraded version that might require more space on your disk. This warning can be disabled by setting the `HF_HUB_DISABLE_SYMLINKS_WARNING` environment variable. For more details, see https://huggingface.co/docs/huggingface_hub/how-to-cache#limitations. | |
| 4 | + | To support symlinks on Windows, you either need to activate Developer Mode or to run Python as an administrator. In order to activate developer mode, see this article: https://docs.microsoft.com/en-us/windows/apps/get-started/enable-your-device-for-development | |
| 5 | + | warnings.warn(message) | |
| 6 | + | Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.52, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`. | |
| 7 | + | config.json: 1.38kB [00:00, ?B/s] | |
| 8 | + | pytorch_model.bin: 100%|██████████████████████████████████████████████████████████| 10.4M/10.4M [00:00<00:00, 15.8MB/s] | |
| 9 | + | 2025-10-22 14:25:33,617 - models.huggingface_sementic_segmentation_model - INFO - Loading google/deeplabv3_mobilenet_v2_1.0_513 ... | |
| 10 | + | 2025-10-22 14:25:33,810 - models.huggingface_sementic_segmentation_model - INFO - google/deeplabv3_mobilenet_v2_1.0_513 model loaded successfully! | |
| 11 | + | 2025-10-22 14:25:33,810 - __main__ - INFO - Processing tests/images/cat_3.jpg | |
| 12 | + | 2025-10-22 14:25:33,810 - utils.utils - INFO - Loading image from tests/images/cat_3.jpg | |
| 13 | + | 2025-10-22 14:25:33,975 - inference.segmenter - ERROR - Segmentation failed for tests/images/cat_3.jpg: Input type (torch.FloatTensor) and weight type (torch.cuda.FloatTensor) should be the same or input should be a MKLDNN tensor and weight is a dense tensor | |
| 14 | + | Traceback (most recent call last): | |
| 15 | + | File "H:\workspaces\segmentation_api\main.py", line 58, in <module> | |
| 16 | + | main() | |
| 17 | + | File "H:\workspaces\segmentation_api\main.py", line 52, in main | |
| 18 | + | results = segmenter.segment(args.image, target_class_ids=args.target_ids, threshold=args.threshold) | |
| 19 | + | File "H:\workspaces\segmentation_api\inference\segmenter.py", line 50, in segment | |
| 20 | + | results = self.model.predict(image, target_class_ids, threshold) | |
| 21 | + | File "H:\workspaces\segmentation_api\models\huggingface_sementic_segmentation_model.py", line 61, in predict | |
| 22 | + | outputs = self.model(**inputs) | |
| 23 | + | File "C:\Users\junyu\anaconda3\envs\segmentation\lib\site-packages\torch\nn\modules\module.py", line 1775, in _wrapped_call_impl | |
| 24 | + | return self._call_impl(*args, **kwargs) | |
| 25 | + | File "C:\Users\junyu\anaconda3\envs\segmentation\lib\site-packages\torch\nn\modules\module.py", line 1786, in _call_impl | |
| 26 | + | return forward_call(*args, **kwargs) | |
| 27 | + | File "C:\Users\junyu\anaconda3\envs\segmentation\lib\site-packages\transformers\models\mobilenet_v2\modeling_mobilenet_v2.py", line 746, in forward | |
| 28 | + | outputs = self.mobilenet_v2( | |
| 29 | + | File "C:\Users\junyu\anaconda3\envs\segmentation\lib\site-packages\torch\nn\modules\module.py", line 1775, in _wrapped_call_impl | |
| 30 | + | return self._call_impl(*args, **kwargs) | |
| 31 | + | File "C:\Users\junyu\anaconda3\envs\segmentation\lib\site-packages\torch\nn\modules\module.py", line 1786, in _call_impl | |
| 32 | + | return forward_call(*args, **kwargs) | |
| 33 | + | File "C:\Users\junyu\anaconda3\envs\segmentation\lib\site-packages\transformers\models\mobilenet_v2\modeling_mobilenet_v2.py", line 527, in forward | |
| 34 | + | hidden_states = self.conv_stem(pixel_values) | |
| 35 | + | File "C:\Users\junyu\anaconda3\envs\segmentation\lib\site-packages\torch\nn\modules\module.py", line 1775, in _wrapped_call_impl | |
| 36 | + | return self._call_impl(*args, **kwargs) | |
| 37 | + | File "C:\Users\junyu\anaconda3\envs\segmentation\lib\site-packages\torch\nn\modules\module.py", line 1786, in _call_impl | |
| 38 | + | return forward_call(*args, **kwargs) | |
| 39 | + | File "C:\Users\junyu\anaconda3\envs\segmentation\lib\site-packages\transformers\models\mobilenet_v2\modeling_mobilenet_v2.py", line 415, in forward | |
| 40 | + | features = self.first_conv(features) | |
| 41 | + | File "C:\Users\junyu\anaconda3\envs\segmentation\lib\site-packages\torch\nn\modules\module.py", line 1775, in _wrapped_call_impl | |
| 42 | + | return self._call_impl(*args, **kwargs) | |
| 43 | + | File "C:\Users\junyu\anaconda3\envs\segmentation\lib\site-packages\torch\nn\modules\module.py", line 1786, in _call_impl | |
| 44 | + | return forward_call(*args, **kwargs) | |
| 45 | + | File "C:\Users\junyu\anaconda3\envs\segmentation\lib\site-packages\transformers\models\mobilenet_v2\modeling_mobilenet_v2.py", line 321, in forward | |
| 46 | + | features = self.convolution(features) | |
| 47 | + | File "C:\Users\junyu\anaconda3\envs\segmentation\lib\site-packages\torch\nn\modules\module.py", line 1775, in _wrapped_call_impl | |
| 48 | + | return self._call_impl(*args, **kwargs) | |
| 49 | + | File "C:\Users\junyu\anaconda3\envs\segmentation\lib\site-packages\torch\nn\modules\module.py", line 1786, in _call_impl | |
| 50 | + | return forward_call(*args, **kwargs) | |
| 51 | + | File "C:\Users\junyu\anaconda3\envs\segmentation\lib\site-packages\torch\nn\modules\conv.py", line 548, in forward | |
| 52 | + | return self._conv_forward(input, self.weight, self.bias) | |
| 53 | + | File "C:\Users\junyu\anaconda3\envs\segmentation\lib\site-packages\torch\nn\modules\conv.py", line 543, in _conv_forward | |
| 54 | + | return F.conv2d( | |
| 55 | + | RuntimeError: Input type (torch.FloatTensor) and weight type (torch.cuda.FloatTensor) should be the same or input should be a MKLDNN tensor and weight is a dense tensor | |
| 56 | + | model.safetensors: 100%|██████████████████████████████████████████████████████████| 10.3M/10.3M [00:00<00:00, 36.9MB/s] | |
Siguiente
Anterior