hfsearch / hfconstants.py
John6666's picture
Upload 3 files
12b1fe6 verified
raw
history blame
2 kB
DS_SIZE_CATEGORIES = ["n<1K", "1K<n<10K", "10K<n<100K", "100K<n<1M", "1M<n<10M", "10M<n<100M",
"100M<n<1B", "1B<n<10B", "10B<n<100B", "100B<n<1T", "n>1T"]
SPACE_HARDWARES = ["cpu-basic", "zero-a10g", "cpu-upgrade", "t4-small", "l4x1", "a10g-large", "l40sx1", "a10g-small", "t4-medium", "cpu-xl", "a100-large"]
SPACE_STAGES = ["RUNNING", "SLEEPING", "RUNTIME_ERROR", "PAUSED", "BUILD_ERROR", "CONFIG_ERROR", "BUILDING", "APP_STARTING", "RUNNING_APP_STARTING"]
SPACE_STAGES_EMOJI = {
"SLEEPING": "πŸ’€", "RUNTIME_ERROR": "πŸ’€", "PAUSED": "⏸️", "BUILD_ERROR": "πŸ’€",
"CONFIG_ERROR": "πŸ’€", "BUILDING": "🚧", "APP_STARTING": "🚧", "RUNNING_APP_STARTING": "🚧"
}
PIPELINES = ["text-generation", "text-to-image", "image-text-to-text", "fill-mask", "text-classification", "sentence-similarity", "automatic-speech-recognition",
"feature-extraction", "text-to-speech", "text2text-generation", "image-to-image", "text-to-video", "zero-shot-image-classification",
"image-classification", "image-to-video", "image-to-text", "token-classification", "translation", "time-series-forecasting", "audio-classification",
"object-detection", "audio-text-to-text", "zero-shot-classification", "text-to-audio", "image-segmentation", "image-feature-extraction",
"video-text-to-text", "image-to-3d", "any-to-any", "question-answering", "text-to-3d", "zero-shot-object-detection", "summarization",
"document-question-answering", "visual-question-answering", "depth-estimation", "voice-activity-detection", "audio-to-audio", "video-classification"]
EMOJIS = {
"Pipeline": {"text-to-image": "🎨", "image-to-image": "πŸ–ΌοΈ", "text-to-speech": "πŸ”Š", "automatic-speech-recognition": "🎀",
"text-to-audio": "🎡", "audio-text-to-text": "πŸŽ™οΈ",
"image-to-video": "🎞️", "text-to-video": "🎬",
"text-generation": "πŸ€–", "text2text-generation": "πŸ€–"},
"SDK": {"docker": "🐳"}
}