πŸ“‹ Model Description

Quantization made by Richard Erkhov.

Github

Discord

Request more models

Llama-3-8B-AEZAKMI-run1 - GGUF

  • Model creator: https://huggingface.co/adamo1139/
  • Original model: https://huggingface.co/adamo1139/Llama-3-8B-AEZAKMI-run1/

NameQuant methodSize
Llama-3-8B-AEZAKMI-run1.Q2K.ggufQ2K2.96GB
Llama-3-8B-AEZAKMI-run1.IQ3XS.ggufIQ3XS3.28GB
Llama-3-8B-AEZAKMI-run1.IQ3S.ggufIQ3S3.43GB
Llama-3-8B-AEZAKMI-run1.Q3KS.ggufQ3K_S3.41GB
Llama-3-8B-AEZAKMI-run1.IQ3M.ggufIQ3M3.52GB
Llama-3-8B-AEZAKMI-run1.Q3K.ggufQ3K3.74GB
Llama-3-8B-AEZAKMI-run1.Q3KM.ggufQ3K_M3.74GB
Llama-3-8B-AEZAKMI-run1.Q3KL.ggufQ3K_L4.03GB
Llama-3-8B-AEZAKMI-run1.IQ4XS.ggufIQ4XS4.18GB
Llama-3-8B-AEZAKMI-run1.Q40.ggufQ404.34GB
Llama-3-8B-AEZAKMI-run1.IQ4NL.ggufIQ4NL4.38GB
Llama-3-8B-AEZAKMI-run1.Q4KS.ggufQ4K_S4.37GB
Llama-3-8B-AEZAKMI-run1.Q4K.ggufQ4K4.58GB
Llama-3-8B-AEZAKMI-run1.Q4KM.ggufQ4K_M4.58GB
Llama-3-8B-AEZAKMI-run1.Q41.ggufQ414.78GB
Llama-3-8B-AEZAKMI-run1.Q50.ggufQ505.21GB
Llama-3-8B-AEZAKMI-run1.Q5KS.ggufQ5K_S5.21GB
Llama-3-8B-AEZAKMI-run1.Q5K.ggufQ5K5.34GB
Llama-3-8B-AEZAKMI-run1.Q5KM.ggufQ5K_M5.34GB
Llama-3-8B-AEZAKMI-run1.Q51.ggufQ515.65GB
Llama-3-8B-AEZAKMI-run1.Q6K.ggufQ6K6.14GB
Llama-3-8B-AEZAKMI-run1.Q80.ggufQ807.95GB

Original model description:



license: other
license_name: llama3
license_link: LICENSE


My first run, 8192 ctx qlora, trained on AEZAKMI-3_6 dataset. Base seems to not be too slopped but finetune is not great - lots of slopped GPTisms, "It's important to remember" etc. It does seem uncensored though, so if you're not fine with Llama-3-8B-Instruct, this might be an option until more better finetunes come out. ChatML prompt format.
Training script below. Took around 8 hours on 3090 Ti via unsloth. Benchmark prompt results can be found in my misc repo

from unsloth import FastLanguageModel
from datasets import Dataset, load_dataset
from dataclasses import dataclass, field
from typing import Dict, Optional
import torch
maxseqlength = 8192 # Choose any! We auto support RoPE Scaling internally!
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
loadin4bit = True # Use 4bit quantization to reduce memory usage. Can be False.

model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "model-path-llama-3-8b", # Choose ANY! eg mistralai/Mistral-7B-Instruct-v0.2
maxseqlength = maxseqlength,
attnimplementation="flashattention_2",
dtype = dtype,
loadin4bit = loadin4bit,
# token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf
)

#@title Alignment Handbook utils
import os
import re
from typing import List, Literal, Optional

from datasets import DatasetDict, concatenatedatasets, loaddataset, loadfromdisk
from datasets.builder import DatasetGenerationError

#DEFAULTCHATTEMPLATE = "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eostoken }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eostoken }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eostoken }}\n{% endif %}\n{% if loop.last and addgeneration_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}"
tokenizer.chattemplate = "{% if not addgenerationprompt is defined %}{% set addgenerationprompt = false %}{% endif %}{% for message in messages %}{{'<|imstart|>' + message['role'] + '\n' + message['content'] + '<|imend|>' + '\n'}}{% endfor %}{% if addgenerationprompt %}{{ '<|imstart|>assistant\n' }}{% endif %}"

from datasets import load_dataset

EOSTOKEN = tokenizer.eostoken # Must add EOS_TOKEN

dataset = loaddataset("adamo1139/AEZAKMIv3-6", split = "train")
def formattingpromptsfunc(examples):
convos = examples["conversations"]
texts = []
mapper = {"system" : "<|imstart|>system\n", "human" : "<|imstart|>user\n", "gpt" : "<|im_start|>assistant\n"}
endmapper = {"system" : "<|imend|>\n", "human" : "<|imend|>\n", "gpt" : "<|imend|>\n"}
for convo in convos:
text = "".join(f"{mapper[(turn := x['from'])]} {x['value']}{end_mapper[turn]}" for x in convo)
texts.append(f"{text}{EOS_TOKEN}") # Since there are multi-turn
# conversations, I append the EOS_TOKEN at the end of the whole
# conversation. These conversations always ends with a gpt message.
return { "text" : texts, }
pass
dataset = dataset.map(formattingpromptsfunc, batched = True,)

import pprint
pprint.pprint("""NOT a formatted dataset""")
pprint
pprint.pprint(dataset[250])
pprint.pprint(dataset[260])
pprint.pprint(dataset[270])
pprint.pprint(dataset[280])
pprint.pprint(dataset[290])

Print sample

pprint.pprint("""formatted dataset""") pprint.pprint(dataset[250]) pprint.pprint(dataset[260]) pprint.pprint(dataset[270]) pprint.pprint(dataset[280]) pprint.pprint(dataset[290])

model = FastLanguageModel.getpeftmodel(
model,
r = 32, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
targetmodules = ["qproj", "kproj", "vproj", "o_proj",
"gateproj", "upproj", "down_proj",],
lora_alpha = 32,
lora_dropout = 0, # Currently only supports dropout = 0
bias = "none", # Currently only supports bias = "none"
usegradientcheckpointing = "unsloth",
random_state = 3407,
use_rslora = False, # We support rank stabilized LoRA
loftq_config = None, # And LoftQ
)

model.printtrainableparameters()

from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, HfArgumentParser, TrainingArguments
from transformers.utils import logging
from trl import SFTTrainer

sft_trainer = SFTTrainer(
model = model,
tokenizer = tokenizer,
train_dataset = dataset,
datasettextfield = "text",
maxseqlength = 8192,
packing=True,
args = TrainingArguments(
evaluation_strategy = "no",
perdevicetrainbatchsize = 2,
gradientaccumulationsteps = 4,
numtrainepochs = 1.5,
warmup_steps = 10,
learning_rate = 0.000095,
fp16 = not torch.cuda.isbf16supported(),
bf16 = torch.cuda.isbf16supported(),
logging_steps = 1,
output_dir = "1904-llama-3-8b-aezakmi-intermediate",
optim = "adamw_8bit",
weight_decay = 0.0,
lrschedulertype = "cosine",
seed = 42,
save_strategy = "steps",
save_steps = 150,
savetotallimit = 5,
),
)

sft_trainer.train()
model.save_pretrained("1904-llama-3-8b-aezakmi-final") # Local saving

πŸ“‚ GGUF File List

πŸ“ Filename πŸ“¦ Size ⚑ Download
Llama-3-8B-AEZAKMI-run1.IQ3_M.gguf
LFS Q3
3.52 GB Download
Llama-3-8B-AEZAKMI-run1.IQ3_S.gguf
LFS Q3
3.43 GB Download
Llama-3-8B-AEZAKMI-run1.IQ3_XS.gguf
LFS Q3
3.28 GB Download
Llama-3-8B-AEZAKMI-run1.IQ4_NL.gguf
LFS Q4
4.38 GB Download
Llama-3-8B-AEZAKMI-run1.IQ4_XS.gguf
LFS Q4
4.18 GB Download
Llama-3-8B-AEZAKMI-run1.Q2_K.gguf
LFS Q2
2.96 GB Download
Llama-3-8B-AEZAKMI-run1.Q3_K.gguf
LFS Q3
3.74 GB Download
Llama-3-8B-AEZAKMI-run1.Q3_K_L.gguf
LFS Q3
4.03 GB Download
Llama-3-8B-AEZAKMI-run1.Q3_K_M.gguf
LFS Q3
3.74 GB Download
Llama-3-8B-AEZAKMI-run1.Q3_K_S.gguf
LFS Q3
3.41 GB Download
Llama-3-8B-AEZAKMI-run1.Q4_0.gguf
Recommended LFS Q4
4.34 GB Download
Llama-3-8B-AEZAKMI-run1.Q4_1.gguf
LFS Q4
4.78 GB Download
Llama-3-8B-AEZAKMI-run1.Q4_K.gguf
LFS Q4
4.58 GB Download
Llama-3-8B-AEZAKMI-run1.Q4_K_M.gguf
LFS Q4
4.58 GB Download
Llama-3-8B-AEZAKMI-run1.Q4_K_S.gguf
LFS Q4
4.37 GB Download
Llama-3-8B-AEZAKMI-run1.Q5_0.gguf
LFS Q5
5.21 GB Download
Llama-3-8B-AEZAKMI-run1.Q5_1.gguf
LFS Q5
5.65 GB Download
Llama-3-8B-AEZAKMI-run1.Q5_K.gguf
LFS Q5
5.34 GB Download
Llama-3-8B-AEZAKMI-run1.Q5_K_M.gguf
LFS Q5
5.34 GB Download
Llama-3-8B-AEZAKMI-run1.Q5_K_S.gguf
LFS Q5
5.21 GB Download
Llama-3-8B-AEZAKMI-run1.Q6_K.gguf
LFS Q6
6.14 GB Download
Llama-3-8B-AEZAKMI-run1.Q8_0.gguf
LFS Q8
7.95 GB Download