"OutOfResources: out of resource: shared memory, Required: 180224, Hardware limit: 101376. "

#27
by joker26 - opened

I'm getting the following error when running the model, here are the details as follow:
“Traceback (most recent call last):
File "/hy-tmp/RAG/app/test_RAG.py", line 154, in
main()
File "/hy-tmp/RAG/app/test_RAG.py", line 123, in main
answer = llm.predict('\n'.join(rerank_res), query).strip()
File "/hy-tmp/RAG/app/llm_infer.py", line 284, in predict
response = self.phi_infer_test(context)
File "/hy-tmp/RAG/app/llm_infer.py", line 229, in phi_infer_test
generated_ids = self.model.generate(
File "/usr/local/miniconda3/envs/myconda/lib/python3.9/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/usr/local/miniconda3/envs/myconda/lib/python3.9/site-packages/transformers/generation/utils.py", line 1969, in generate
result = self._sample(
File "/usr/local/miniconda3/envs/myconda/lib/python3.9/site-packages/transformers/generation/utils.py", line 2912, in _sample
outputs = self(**model_inputs, return_dict=True)
File "/usr/local/miniconda3/envs/myconda/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/miniconda3/envs/myconda/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "/root/.cache/huggingface/modules/transformers_modules/Phi-3-small-128k-instruct/modeling_phi3_small.py", line 956, in forward
outputs = self.model(
File "/usr/local/miniconda3/envs/myconda/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/miniconda3/envs/myconda/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "/root/.cache/huggingface/modules/transformers_modules/Phi-3-small-128k-instruct/modeling_phi3_small.py", line 859, in forward
layer_outputs = decoder_layer(
File "/usr/local/miniconda3/envs/myconda/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/miniconda3/envs/myconda/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "/root/.cache/huggingface/modules/transformers_modules/Phi-3-small-128k-instruct/modeling_phi3_small.py", line 671, in forward
hidden_states, self_attn_weights, present_key_values = self.self_attn(
File "/usr/local/miniconda3/envs/myconda/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/miniconda3/envs/myconda/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "/root/.cache/huggingface/modules/transformers_modules/Phi-3-small-128k-instruct/modeling_phi3_small.py", line 616, in forward
attn_function_output = self._apply_blocksparse_attention(
File "/root/.cache/huggingface/modules/transformers_modules/Phi-3-small-128k-instruct/modeling_phi3_small.py", line 382, in _apply_blocksparse_attention
context_layer = self._blocksparse_layer(
File "/usr/local/miniconda3/envs/myconda/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/miniconda3/envs/myconda/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "/root/.cache/huggingface/modules/transformers_modules/Phi-3-small-128k-instruct/triton_blocksparse_attention_layer.py", line 165, in forward
return blocksparse_flash_attn_padded_fwd(
File "/root/.cache/huggingface/modules/transformers_modules/Phi-3-small-128k-instruct/triton_flash_blocksparse_attn.py", line 994, in blocksparse_flash_attn_padded_fwd
_fwd_kernel_batch_inference[grid](
File "/usr/local/miniconda3/envs/myconda/lib/python3.9/site-packages/triton/runtime/jit.py", line 167, in
return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
File "/usr/local/miniconda3/envs/myconda/lib/python3.9/site-packages/triton/runtime/autotuner.py", line 305, in run
return self.fn.run(*args, **kwargs)
File "/usr/local/miniconda3/envs/myconda/lib/python3.9/site-packages/triton/runtime/jit.py", line 425, in run
kernel.run(grid_0, grid_1, grid_2, kernel.num_warps, kernel.num_ctas, # number of warps/ctas per instance
File "/usr/local/miniconda3/envs/myconda/lib/python3.9/site-packages/triton/compiler/compiler.py", line 255, in getattribute
self._init_handles()
File "/usr/local/miniconda3/envs/myconda/lib/python3.9/site-packages/triton/compiler/compiler.py", line 248, in _init_handles
raise OutOfResources(self.shared, max_shared, "shared memory")
triton.runtime.autotuner.OutOfResources: out of resource: shared memory, Required: 180224, Hardware limit: 101376. Reducing block sizes or num_stages may help.”

I'm having this problem with my 4090 and 3090.

Sign up or log in to comment