π Model Description
Quantization made by Richard Erkhov.
Llama-3-8B-AEZAKMI-run1 - GGUF
- Model creator: https://huggingface.co/adamo1139/
- Original model: https://huggingface.co/adamo1139/Llama-3-8B-AEZAKMI-run1/
| Name | Quant method | Size |
|---|---|---|
| Llama-3-8B-AEZAKMI-run1.Q2K.gguf | Q2K | 2.96GB |
| Llama-3-8B-AEZAKMI-run1.IQ3XS.gguf | IQ3XS | 3.28GB |
| Llama-3-8B-AEZAKMI-run1.IQ3S.gguf | IQ3S | 3.43GB |
| Llama-3-8B-AEZAKMI-run1.Q3KS.gguf | Q3K_S | 3.41GB |
| Llama-3-8B-AEZAKMI-run1.IQ3M.gguf | IQ3M | 3.52GB |
| Llama-3-8B-AEZAKMI-run1.Q3K.gguf | Q3K | 3.74GB |
| Llama-3-8B-AEZAKMI-run1.Q3KM.gguf | Q3K_M | 3.74GB |
| Llama-3-8B-AEZAKMI-run1.Q3KL.gguf | Q3K_L | 4.03GB |
| Llama-3-8B-AEZAKMI-run1.IQ4XS.gguf | IQ4XS | 4.18GB |
| Llama-3-8B-AEZAKMI-run1.Q40.gguf | Q40 | 4.34GB |
| Llama-3-8B-AEZAKMI-run1.IQ4NL.gguf | IQ4NL | 4.38GB |
| Llama-3-8B-AEZAKMI-run1.Q4KS.gguf | Q4K_S | 4.37GB |
| Llama-3-8B-AEZAKMI-run1.Q4K.gguf | Q4K | 4.58GB |
| Llama-3-8B-AEZAKMI-run1.Q4KM.gguf | Q4K_M | 4.58GB |
| Llama-3-8B-AEZAKMI-run1.Q41.gguf | Q41 | 4.78GB |
| Llama-3-8B-AEZAKMI-run1.Q50.gguf | Q50 | 5.21GB |
| Llama-3-8B-AEZAKMI-run1.Q5KS.gguf | Q5K_S | 5.21GB |
| Llama-3-8B-AEZAKMI-run1.Q5K.gguf | Q5K | 5.34GB |
| Llama-3-8B-AEZAKMI-run1.Q5KM.gguf | Q5K_M | 5.34GB |
| Llama-3-8B-AEZAKMI-run1.Q51.gguf | Q51 | 5.65GB |
| Llama-3-8B-AEZAKMI-run1.Q6K.gguf | Q6K | 6.14GB |
| Llama-3-8B-AEZAKMI-run1.Q80.gguf | Q80 | 7.95GB |
Original model description:
license: other
license_name: llama3
license_link: LICENSE
My first run, 8192 ctx qlora, trained on AEZAKMI-3_6 dataset. Base seems to not be too slopped but finetune is not great - lots of slopped GPTisms, "It's important to remember" etc. It does seem uncensored though, so if you're not fine with Llama-3-8B-Instruct, this might be an option until more better finetunes come out. ChatML prompt format.
Training script below. Took around 8 hours on 3090 Ti via unsloth. Benchmark prompt results can be found in my misc repo
from unsloth import FastLanguageModel
from datasets import Dataset, load_dataset
from dataclasses import dataclass, field
from typing import Dict, Optional
import torch
maxseqlength = 8192 # Choose any! We auto support RoPE Scaling internally!
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
loadin4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "model-path-llama-3-8b", # Choose ANY! eg mistralai/Mistral-7B-Instruct-v0.2
maxseqlength = maxseqlength,
attnimplementation="flashattention_2",
dtype = dtype,
loadin4bit = loadin4bit,
# token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf
)
#@title Alignment Handbook utils
import os
import re
from typing import List, Literal, Optional
from datasets import DatasetDict, concatenatedatasets, loaddataset, loadfromdisk
from datasets.builder import DatasetGenerationError
#DEFAULTCHATTEMPLATE = "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eostoken }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eostoken }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eostoken }}\n{% endif %}\n{% if loop.last and addgeneration_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}"
tokenizer.chattemplate = "{% if not addgenerationprompt is defined %}{% set addgenerationprompt = false %}{% endif %}{% for message in messages %}{{'<|imstart|>' + message['role'] + '\n' + message['content'] + '<|imend|>' + '\n'}}{% endfor %}{% if addgenerationprompt %}{{ '<|imstart|>assistant\n' }}{% endif %}"
from datasets import load_dataset
EOSTOKEN = tokenizer.eostoken # Must add EOS_TOKEN
dataset = loaddataset("adamo1139/AEZAKMIv3-6", split = "train")
def formattingpromptsfunc(examples):
convos = examples["conversations"]
texts = []
mapper = {"system" : "<|imstart|>system\n", "human" : "<|imstart|>user\n", "gpt" : "<|im_start|>assistant\n"}
endmapper = {"system" : "<|imend|>\n", "human" : "<|imend|>\n", "gpt" : "<|imend|>\n"}
for convo in convos:
text = "".join(f"{mapper[(turn := x['from'])]} {x['value']}{end_mapper[turn]}" for x in convo)
texts.append(f"{text}{EOS_TOKEN}") # Since there are multi-turn
# conversations, I append the EOS_TOKEN at the end of the whole
# conversation. These conversations always ends with a gpt message.
return { "text" : texts, }
pass
dataset = dataset.map(formattingpromptsfunc, batched = True,)
import pprint
pprint.pprint("""NOT a formatted dataset""")
pprint
pprint.pprint(dataset[250])
pprint.pprint(dataset[260])
pprint.pprint(dataset[270])
pprint.pprint(dataset[280])
pprint.pprint(dataset[290])
Print sample
pprint.pprint("""formatted dataset""")
pprint.pprint(dataset[250])
pprint.pprint(dataset[260])
pprint.pprint(dataset[270])
pprint.pprint(dataset[280])
pprint.pprint(dataset[290])
model = FastLanguageModel.getpeftmodel(
model,
r = 32, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
targetmodules = ["qproj", "kproj", "vproj", "o_proj",
"gateproj", "upproj", "down_proj",],
lora_alpha = 32,
lora_dropout = 0, # Currently only supports dropout = 0
bias = "none", # Currently only supports bias = "none"
usegradientcheckpointing = "unsloth",
random_state = 3407,
use_rslora = False, # We support rank stabilized LoRA
loftq_config = None, # And LoftQ
)
model.printtrainableparameters()
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, HfArgumentParser, TrainingArguments
from transformers.utils import logging
from trl import SFTTrainer
sft_trainer = SFTTrainer(
model = model,
tokenizer = tokenizer,
train_dataset = dataset,
datasettextfield = "text",
maxseqlength = 8192,
packing=True,
args = TrainingArguments(
evaluation_strategy = "no",
perdevicetrainbatchsize = 2,
gradientaccumulationsteps = 4,
numtrainepochs = 1.5,
warmup_steps = 10,
learning_rate = 0.000095,
fp16 = not torch.cuda.isbf16supported(),
bf16 = torch.cuda.isbf16supported(),
logging_steps = 1,
output_dir = "1904-llama-3-8b-aezakmi-intermediate",
optim = "adamw_8bit",
weight_decay = 0.0,
lrschedulertype = "cosine",
seed = 42,
save_strategy = "steps",
save_steps = 150,
savetotallimit = 5,
),
)
sft_trainer.train()
model.save_pretrained("1904-llama-3-8b-aezakmi-final") # Local saving
π GGUF File List
| π Filename | π¦ Size | β‘ Download |
|---|---|---|
|
Llama-3-8B-AEZAKMI-run1.IQ3_M.gguf
LFS
Q3
|
3.52 GB | Download |
|
Llama-3-8B-AEZAKMI-run1.IQ3_S.gguf
LFS
Q3
|
3.43 GB | Download |
|
Llama-3-8B-AEZAKMI-run1.IQ3_XS.gguf
LFS
Q3
|
3.28 GB | Download |
|
Llama-3-8B-AEZAKMI-run1.IQ4_NL.gguf
LFS
Q4
|
4.38 GB | Download |
|
Llama-3-8B-AEZAKMI-run1.IQ4_XS.gguf
LFS
Q4
|
4.18 GB | Download |
|
Llama-3-8B-AEZAKMI-run1.Q2_K.gguf
LFS
Q2
|
2.96 GB | Download |
|
Llama-3-8B-AEZAKMI-run1.Q3_K.gguf
LFS
Q3
|
3.74 GB | Download |
|
Llama-3-8B-AEZAKMI-run1.Q3_K_L.gguf
LFS
Q3
|
4.03 GB | Download |
|
Llama-3-8B-AEZAKMI-run1.Q3_K_M.gguf
LFS
Q3
|
3.74 GB | Download |
|
Llama-3-8B-AEZAKMI-run1.Q3_K_S.gguf
LFS
Q3
|
3.41 GB | Download |
|
Llama-3-8B-AEZAKMI-run1.Q4_0.gguf
Recommended
LFS
Q4
|
4.34 GB | Download |
|
Llama-3-8B-AEZAKMI-run1.Q4_1.gguf
LFS
Q4
|
4.78 GB | Download |
|
Llama-3-8B-AEZAKMI-run1.Q4_K.gguf
LFS
Q4
|
4.58 GB | Download |
|
Llama-3-8B-AEZAKMI-run1.Q4_K_M.gguf
LFS
Q4
|
4.58 GB | Download |
|
Llama-3-8B-AEZAKMI-run1.Q4_K_S.gguf
LFS
Q4
|
4.37 GB | Download |
|
Llama-3-8B-AEZAKMI-run1.Q5_0.gguf
LFS
Q5
|
5.21 GB | Download |
|
Llama-3-8B-AEZAKMI-run1.Q5_1.gguf
LFS
Q5
|
5.65 GB | Download |
|
Llama-3-8B-AEZAKMI-run1.Q5_K.gguf
LFS
Q5
|
5.34 GB | Download |
|
Llama-3-8B-AEZAKMI-run1.Q5_K_M.gguf
LFS
Q5
|
5.34 GB | Download |
|
Llama-3-8B-AEZAKMI-run1.Q5_K_S.gguf
LFS
Q5
|
5.21 GB | Download |
|
Llama-3-8B-AEZAKMI-run1.Q6_K.gguf
LFS
Q6
|
6.14 GB | Download |
|
Llama-3-8B-AEZAKMI-run1.Q8_0.gguf
LFS
Q8
|
7.95 GB | Download |