GPU_TYPE_MAPPING = {
"3090": "NVIDIA GeForce RTX 3090",
"3090Ti": "NVIDIA GeForce RTX 3090 Ti",
"A5000": "NVIDIA RTX A5000",
"A6000": "NVIDIA RTX A6000",
"4000Ada": "NVIDIA RTX 4000 Ada Generation",
}
GPU_TYPES = list(GPU_TYPE_MAPPING.keys())
QUEUE_MANAGER_HOST = "http://127.0.0.1:7777"
DEFAULT_PAYLOAD = {
"allowedCudaVersions": [],
"cloudType": "SECURE",
"computeType": "GPU",
"containerDiskInGb": 50,
"containerRegistryAuthId": "",
"countryCodes": [""],
"cpuFlavorPriority": "availability",
"dataCenterPriority": "availability",
"dockerEntrypoint": [],
"dockerStartCmd": [],
"env": {
"QUEUE_MANAGER_HOST": QUEUE_MANAGER_HOST
},
"gpuCount": 1,
"gpuTypePriority": "availability",
"interruptible": False,
"locked": False,
"minDiskBandwidthMBps": 500,
"minDownloadMbps": 500,
"minRAMPerGPU": 32,
"minUploadMbps": 500,
"minVCPUPerGPU": 8,
"ports": [],
"supportPublicIp": False
}
GPU_TYPE_MAPPING = {
"3090": "NVIDIA GeForce RTX 3090",
"3090Ti": "NVIDIA GeForce RTX 3090 Ti",
"A5000": "NVIDIA RTX A5000",
"A6000": "NVIDIA RTX A6000",
"4000Ada": "NVIDIA RTX 4000 Ada Generation",
}
GPU_TYPES = list(GPU_TYPE_MAPPING.keys())
QUEUE_MANAGER_HOST = "http://127.0.0.1:7777"
DEFAULT_PAYLOAD = {
"allowedCudaVersions": [],
"cloudType": "SECURE",
"computeType": "GPU",
"containerDiskInGb": 50,
"containerRegistryAuthId": "",
"countryCodes": [""],
"cpuFlavorPriority": "availability",
"dataCenterPriority": "availability",
"dockerEntrypoint": [],
"dockerStartCmd": [],
"env": {
"QUEUE_MANAGER_HOST": QUEUE_MANAGER_HOST
},
"gpuCount": 1,
"gpuTypePriority": "availability",
"interruptible": False,
"locked": False,
"minDiskBandwidthMBps": 500,
"minDownloadMbps": 500,
"minRAMPerGPU": 32,
"minUploadMbps": 500,
"minVCPUPerGPU": 8,
"ports": [],
"supportPublicIp": False
}