ad-llama

code
{
  "modelSpec": {
    "modelWeightsConfigUrl": "https://huggingface.co/mlc-ai/Llama-2-7b-chat-hf-q4f16_1-MLC/resolve/main/",
    "modelLibWasmUrl": "https://raw.githubusercontent.com/mlc-ai/binary-mlc-llm-libs/main/Llama-2-7b-chat-hf/Llama-2-7b-chat-hf-q4f16_1-ctx4k_cs1k-webgpu.wasm"
  },
  "targetDevice": "gpu",
  "loadModelConfig": "waiting"
}