52 lines
1.7 KiB
Python
52 lines
1.7 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Check if PaddlePaddle has GPU support
|
|
"""
|
|
|
|
import paddle
|
|
import sys
|
|
|
|
def check_paddle_gpu():
|
|
print("Checking PaddlePaddle GPU support...")
|
|
print(f"PaddlePaddle version: {paddle.__version__}")
|
|
|
|
# Check if GPU is available
|
|
if paddle.device.is_compiled_with_cuda():
|
|
print("✓ PaddlePaddle compiled with CUDA support")
|
|
|
|
# Check available GPU devices
|
|
gpu_count = paddle.device.cuda.device_count()
|
|
print(f"✓ Number of available GPUs: {gpu_count}")
|
|
|
|
for i in range(gpu_count):
|
|
try:
|
|
device_name = paddle.device.cuda.get_device_name(i)
|
|
print(f" GPU {i}: {device_name}")
|
|
except Exception as e:
|
|
print(f" GPU {i}: Unable to get device name - {e}")
|
|
|
|
# Test GPU usage
|
|
try:
|
|
# Create a simple tensor on GPU
|
|
with paddle.device.cuda.device(0):
|
|
x = paddle.to_tensor([1.0, 2.0, 3.0])
|
|
print(f"✓ Successfully created tensor on GPU: {x}")
|
|
print(f" Tensor device: {x.place}")
|
|
except Exception as e:
|
|
print(f"✗ Failed to use GPU: {e}")
|
|
|
|
else:
|
|
print("✗ PaddlePaddle not compiled with CUDA support (CPU only)")
|
|
|
|
# Check other GPU backends
|
|
if paddle.device.is_compiled_with_rocm():
|
|
print("✓ PaddlePaddle compiled with ROCm support")
|
|
else:
|
|
print("✗ PaddlePaddle not compiled with ROCm support")
|
|
|
|
# Check current device
|
|
current_device = paddle.device.get_device()
|
|
print(f"Current device: {current_device}")
|
|
|
|
if __name__ == "__main__":
|
|
check_paddle_gpu() |