Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,4 @@ __pycache__
ChromaDB
models
.vscode
hal_baseline*/
hal_baseline_*/
63 changes: 63 additions & 0 deletions script_mteb_french/quality_checks/hal/hal_baseline/scores.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
[
{
"score": {
"eval_loss": 1.0339970588684082,
"eval_f1": 0.6143314762314238,
"eval_runtime": 54.7352,
"eval_samples_per_second": 143.783,
"eval_steps_per_second": 8.989,
"epoch": 5.0
},
"seed": 42
},{
"score": {
"eval_loss": 1.0847108364105225,
"eval_f1": 0.6004042539017442,
"eval_runtime": 54.9377,
"eval_samples_per_second": 143.253,
"eval_steps_per_second": 8.956,
"epoch": 5.0
},
"seed": 2024
},{
"score": {
"eval_loss": 1.0931035280227661,
"eval_f1": 0.5940237137849953,
"eval_runtime": 54.5868,
"eval_samples_per_second": 144.174,
"eval_steps_per_second": 9.013,
"epoch": 5.0
},
"seed": 5050
}
]{
"score": {
"eval_loss": 1.083167552947998,
"eval_f1": 0.609011590210188,
"eval_runtime": 54.7637,
"eval_samples_per_second": 143.708,
"eval_steps_per_second": 8.984,
"epoch": 5.0
},
"seed": 42
}{
"score": {
"eval_loss": 1.0764442682266235,
"eval_f1": 0.6106352447263774,
"eval_runtime": 54.7671,
"eval_samples_per_second": 143.699,
"eval_steps_per_second": 8.983,
"epoch": 5.0
},
"seed": 42
}{
"score": {
"eval_loss": 1.08128821849823,
"eval_f1": 0.6074891108370322,
"eval_runtime": 54.5086,
"eval_samples_per_second": 144.381,
"eval_steps_per_second": 9.026,
"epoch": 5.0
},
"seed": 42
}
4 changes: 2 additions & 2 deletions script_mteb_french/quality_checks/hal/hal_baseline_bert.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,10 +81,10 @@ def compute_metrics(eval_pred):
if not os.path.exists(args.model_dir):
os.makedirs(args.model_dir)

with open(f"{args.output_dir}/scores.json", "w") as f:
with open(f"{args.output_dir}/scores.json", "a+") as f:
json.dump({
"score": score,
"seed": args.model_seed,
"seed": args.dataset_seed,
}, f, indent=2)

trainer.save_model(f"{args.model_dir}_{args.dataset_seed}")
Expand Down
28 changes: 19 additions & 9 deletions script_mteb_french/quality_checks/hal/hal_baseline_lda.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from datasets import load_dataset

import spacy
import nltk
import spacy.cli
nltk.download('stopwords')
from nltk.corpus import stopwords

Expand All @@ -10,24 +12,32 @@

DATASET = "lyon-nlp/clustering-hal-s2s"
SEED = 42
STOPWORDS = stopwords.words("french") + stopwords.words("english")
STOPWORDS = stopwords.words("french") # + stopwords.words("english")

try:
nlp = spacy.load('fr_core_news_sm', disable = ['parser','ner'])
except FileNotFoundError:
spacy.cli.download("fr_core_news_sm")
nlp = spacy.load('fr_core_news_md', disable = ['parser','ner'])


dataset = load_dataset(DATASET, name="mteb_eval", split="test")
dataset = dataset.class_encode_column("domain")
num_classes = dataset.features["domain"].num_classes
id2label = {k: dataset.features["domain"].int2str(k) for k in range(num_classes)}
dataset = dataset.train_test_split(test_size=0.3, shuffle=True, stratify_by_column="domain", seed=SEED)

X_train, y_train = dataset["train"]["title"], dataset["train"]["domain"]
X_test, y_test = dataset["test"]["title"], dataset["test"]["domain"]
texts, domains = dataset["title"], dataset["domain"]

docs = nlp.pipe(texts)

def tokenize_text(doc):
return [token.lemma_.lower() for token in doc if token not in STOPWORDS]

tokenized_X_train = [text.split() for text in X_train]
tokenized_X_test = [text.split() for text in X_test]
tokenized_texts = [tokenize_text(doc) for doc in docs]
print(tokenized_texts[:5])

common_dictionary = Dictionary(tokenized_X_train)
common_corpus = [common_dictionary.doc2bow(text) for text in tokenized_X_train]
common_dictionary = Dictionary(tokenized_texts)
common_corpus = [common_dictionary.doc2bow(text) for text in tokenized_texts]

lda = LdaModel(
common_corpus,
Expand All @@ -38,6 +48,6 @@
print(f"Perplexity: {lda.log_perplexity(common_corpus)}")

coherence_lda = CoherenceModel(
model=lda, texts=tokenized_X_train,
model=lda, texts=tokenized_texts,
dictionary=common_dictionary, coherence='c_v')
print(f"Coherence: {coherence_lda.get_coherence()}")
7 changes: 1 addition & 6 deletions script_mteb_french/quality_checks/hal/hal_baseline_tfidf.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,16 +30,11 @@ def main(args):
X_train, y_train = dataset["train"]["title"], dataset["train"]["domain"]
X_test, y_test = dataset["test"]["title"], dataset["test"]["domain"]

estimators = [LogisticRegression(max_iter=1000), RandomForestClassifier(), SVC()]
estimators = [LogisticRegression(max_iter=1000), SVC()]
parameters = [
{
"estimator__C": [0.1, 1, 10],
},
{
"estimator__n_estimators": [200, 300],
"estimator__bootstrap": [True, False],
"estimator__class_weight": ["balanced", "balanced_subsample"],
},
{
"estimator__C": [0.1, 1, 10],
"estimator__kernel": ["linear", "poly", "rbf", "sigmoid"],
Expand Down
3 changes: 3 additions & 0 deletions script_mteb_french/quality_checks/hal/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
langdetect>=1.0.9
gensim>=4.3.2
spacy>=3.7.5
6 changes: 1 addition & 5 deletions script_mteb_french/quality_checks/hal/run_bert.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,7 @@ seeds=(
5050
)

cd script_mteb_french/quality_checks/hal

for seed in "${seeds[@]}";
do
python hal_baseline_bert.py --dataset_seed $seed
python hal_baseline_bert.py --dataset_seed $seed --epochs 5 --batch_size 32 --lr 1e-4
done

cd ../../..
1 change: 1 addition & 0 deletions script_mteb_french/quality_checks/hal/run_lda.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
python hal_baseline_lda.py
4 changes: 0 additions & 4 deletions script_mteb_french/quality_checks/hal/run_tfidf.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,7 @@ seeds=(
5050
)

cd script_mteb_french/quality_checks/hal

for seed in "${seeds[@]}";
do
python hal_baseline_tfidf.py --dataset_seed $seed
done

cd ../../..
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
{
"estimator__C": 10,
"tfidf__max_features": 10000
}
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
{
"estimator__C": 10,
"estimator__C": 1,
"tfidf__max_features": 10000
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
{
"estimator__C": 10,
"tfidf__max_features": 10000
}

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"estimator__C": 1,
"estimator__class_weight": null,
"estimator__kernel": "linear",
"tfidf__max_features": 10000
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"estimator__C": 1,
"estimator__class_weight": null,
"estimator__kernel": "linear",
"tfidf__max_features": 10000
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"estimator__C": 1,
"estimator__class_weight": "balanced",
"estimator__kernel": "rbf",
"tfidf__max_features": 10000
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
{
"chim": {
"precision": 0.54375,
"recall": 0.39545454545454545,
"f1-score": 0.45789473684210524,
"support": 220.0
},
"info": {
"precision": 0.5698427382053654,
"recall": 0.6292134831460674,
"f1-score": 0.5980582524271845,
"support": 979.0
},
"math": {
"precision": 0.5304878048780488,
"recall": 0.3522267206477733,
"f1-score": 0.4233576642335766,
"support": 247.0
},
"phys": {
"precision": 0.4656188605108055,
"recall": 0.4100346020761246,
"f1-score": 0.43606255749770007,
"support": 578.0
},
"scco": {
"precision": 0.3493975903614458,
"recall": 0.15591397849462366,
"f1-score": 0.21561338289962825,
"support": 186.0
},
"sde": {
"precision": 0.5275779376498801,
"recall": 0.5326876513317191,
"f1-score": 0.5301204819277109,
"support": 826.0
},
"sdu": {
"precision": 0.5843621399176955,
"recall": 0.40804597701149425,
"f1-score": 0.4805414551607445,
"support": 348.0
},
"sdv": {
"precision": 0.6847672778561354,
"recall": 0.6738376127689105,
"f1-score": 0.6792584819867087,
"support": 1441.0
},
"shs": {
"precision": 0.7087294727744166,
"recall": 0.8159203980099502,
"f1-score": 0.7585568917668826,
"support": 2010.0
},
"spi": {
"precision": 0.5385338345864662,
"recall": 0.553623188405797,
"f1-score": 0.5459742734635541,
"support": 1035.0
},
"accuracy": 0.6127064803049556,
"macro avg": {
"precision": 0.5503067656740259,
"recall": 0.49269581573470056,
"f1-score": 0.5125438178205795,
"support": 7870.0
},
"weighted avg": {
"precision": 0.6036166441353017,
"recall": 0.6127064803049556,
"f1-score": 0.6044035075282668,
"support": 7870.0
}
}
Loading