File size: 4,826 Bytes
3f608c6
 
 
 
b9a642a
3b00a19
9e58b03
166f484
9e58b03
 
214fb7b
88dd848
3f608c6
bf8c5bd
3965e1f
bf8c5bd
88dd848
06e3150
bf8c5bd
b9a642a
bf8c5bd
b9a642a
bf8c5bd
b9a642a
bf8c5bd
b9a642a
 
 
bf8c5bd
9e58b03
166f484
9e58b03
60f8351
88dd848
bf8c5bd
 
 
 
 
 
b9a642a
bf8c5bd
9e58b03
 
 
 
 
88dd848
bf8c5bd
 
 
40ba0ea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bcf3537
b9a642a
 
bcf3537
 
b9a642a
bcf3537
cd6b52a
b9a642a
 
cd6b52a
 
b9a642a
cd6b52a
9e58b03
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88dd848
 
9e58b03
 
 
 
40ba0ea
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
MODEL_MAP = {
    "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",  # [Recommended]
    "nous-mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
    "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.2",
    "yi-1.5-34b": "01-ai/Yi-1.5-34B-Chat",
    "gemma-7b": "google/gemma-1.1-7b-it",
    "openchat-3.5": "openchat/openchat-3.5-0106",
    "command-r-plus": "CohereForAI/c4ai-command-r-plus-4bit",
    "llama3-70b": "meta-llama/Meta-Llama-3-70B-Instruct",
    "zephyr-141b": "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
    "default": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
    "Qwen2-72B": "Qwen/Qwen2-72B",
}

AVAILABLE_MODELS = list(MODEL_MAP.keys())

PRO_MODELS = ["command-r-plus", "llama3-70b", "zephyr-141b", "Qwen2-72B"]

STOP_SEQUENCES_MAP = {
    # https://huggingface.co./mistralai/Mixtral-8x7B-Instruct-v0.1/blob/main/tokenizer_config.json#L33
    "mixtral-8x7b": "</s>",
    # https://huggingface.co./NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO/blob/main/tokenizer_config.json#L50
    "nous-mixtral-8x7b": "<|im_end|>",
    # https://huggingface.co./mistralai/Mistral-7B-Instruct-v0.2/blob/main/tokenizer_config.json#L33
    "mistral-7b": "</s>",
    # https://huggingface.co./01-ai/Yi-1.5-34B-Chat/blob/main/tokenizer_config.json#L42
    "yi-1.5-34b": "<|im_end|>",
    # https://huggingface.co./google/gemma-1.1-7b-it/blob/main/tokenizer_config.json#L1509
    "gemma-7b": "<eos>",
    "openchat-3.5": "<|end_of_turn|>",
    # https://huggingface.co./CohereForAI/c4ai-command-r-plus-4bit/blob/main/tokenizer_config.json#L305
    "command-r-plus": "<|END_OF_TURN_TOKEN|>",
    # https://huggingface.co./Qwen/Qwen2-72B-Instruct/blob/main/tokenizer_config.json#L30
    "Qwen2-72B": "<|im_end|>",
}

TOKEN_LIMIT_MAP = {
    "mixtral-8x7b": 32768,
    "nous-mixtral-8x7b": 32768,
    "mistral-7b": 32768,
    "yi-1.5-34b": 4096,
    "gemma-7b": 8192,
    "openchat-3.5": 8192,
    "command-r-plus": 32768,
    "llama3-70b": 8192,
    "zephyr-141b": 2048,
    "gpt-3.5-turbo": 8192,
    "Qwen2-72B": 32768
}

TOKEN_RESERVED = 20


# https://platform.openai.com/docs/api-reference/models/list
AVAILABLE_MODELS_DICTS = [
    {
        "id": "mixtral-8x7b",
        "description": "[mistralai/Mixtral-8x7B-Instruct-v0.1]: https://huggingface.co./mistralai/Mixtral-8x7B-Instruct-v0.1",
        "object": "model",
        "created": 1700000000,
        "owned_by": "mistralai",
    },
    {
        "id": "nous-mixtral-8x7b",
        "description": "[NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO]: https://huggingface.co./NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
        "object": "model",
        "created": 1700000000,
        "owned_by": "NousResearch",
    },
    {
        "id": "mistral-7b",
        "description": "[mistralai/Mistral-7B-Instruct-v0.2]: https://huggingface.co./mistralai/Mistral-7B-Instruct-v0.2",
        "object": "model",
        "created": 1700000000,
        "owned_by": "mistralai",
    },
    {
        "id": "yi-1.5-34b",
        "description": "[01-ai/Yi-1.5-34B-Chat]: https://huggingface.co./01-ai/Yi-1.5-34B-Chat",
        "object": "model",
        "created": 1700000000,
        "owned_by": "01-ai",
    },
    {
        "id": "gemma-7b",
        "description": "[google/gemma-1.1-7b-it]: https://huggingface.co./google/gemma-1.1-7b-it",
        "object": "model",
        "created": 1700000000,
        "owned_by": "Google",
    },
    {
        "id": "openchat-3.5",
        "description": "[openchat/openchat-3.5-0106]: https://huggingface.co./openchat/openchat-3.5-0106",
        "object": "model",
        "created": 1700000000,
        "owned_by": "openchat"
    },
    {
        "id": "command-r-plus",
        "description": "[CohereForAI/c4ai-command-r-plus]: https://huggingface.co./CohereForAI/c4ai-command-r-plus",
        "object": "model",
        "created": 1700000000,
        "owned_by": "CohereForAI"
    },
    {
        "id": "llama3-70b",
        "description": "[meta-llama/Meta-Llama-3-70B]: https://huggingface.co./meta-llama/Meta-Llama-3-70B",
        "object": "model",
        "created": 1700000000,
        "owned_by": "Meta"
    },
    {
        "id": "zephyr-141b",
        "description": "[HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1]: https://huggingface.co./HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
        "object": "model",
        "created": 1700000000,
        "owned_by": "Huggingface"
    },
    {
        "id": "gpt-3.5-turbo",
        "description": "[openai/gpt-3.5-turbo]: https://platform.openai.com/docs/models/gpt-3-5-turbo",
        "object": "model",
        "created": 1700000000,
        "owned_by": "OpenAI"
    },
    {
        "id": "Qwen2-72B",
        "description": "[Qwen/Qwen2-72B]: https://huggingface.co./Qwen/Qwen2-72B",
        "object": "model",
        "created": 1700000000,
        "owned_by": "Qwen"
    }
]