|
|
|
import json
|
|
|
|
from datasets import Dataset
|
|
|
|
from transformers import (
|
|
|
|
AutoTokenizer,
|
|
|
|
AutoModelForTokenClassification,
|
|
|
|
TrainingArguments,
|
|
|
|
Trainer,
|
|
|
|
DataCollatorForTokenClassification
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
# 加载数据
|
|
|
|
def load_data(file_path):
|
|
|
|
with open(file_path, "r", encoding="utf-8") as f:
|
|
|
|
data = json.load(f)
|
|
|
|
return data
|
|
|
|
|
|
|
|
|
|
|
|
# 数据预处理
|
|
|
|
def tokenize_and_align_labels(examples, tokenizer):
|
|
|
|
tokenized_inputs = tokenizer(examples["text"], truncation=True, padding=True, is_split_into_words=True)
|
|
|
|
labels = []
|
|
|
|
for i, label in enumerate(examples["labels"]):
|
|
|
|
word_ids = tokenized_inputs.word_ids(batch_index=i)
|
|
|
|
label_ids = []
|
|
|
|
for word_idx in word_ids:
|
|
|
|
if word_idx is None:
|
|
|
|
label_ids.append(-100) # 特殊标记
|
|
|
|
else:
|
|
|
|
label_name = label[word_idx]["label"]
|
|
|
|
label_id = label2id[label_name] # 将标签名称转换为 ID
|
|
|
|
label_ids.append(label_id)
|
|
|
|
labels.append(label_ids)
|
|
|
|
tokenized_inputs["labels"] = labels
|
|
|
|
return tokenized_inputs
|
|
|
|
|
|
|
|
|
|
|
|
# 标签映射
|
|
|
|
label2id = {
|
|
|
|
"TIME": 0,
|
|
|
|
"SCHOOL": 1,
|
|
|
|
"COLLEGE": 2,
|
|
|
|
"MAJOR": 3,
|
|
|
|
"ORG": 4
|
|
|
|
}
|
|
|
|
id2label = {v: k for k, v in label2id.items()}
|
|
|
|
|
|
|
|
# 加载数据
|
|
|
|
train_data = load_data("data/train.json")
|
|
|
|
val_data = load_data("data/val.json")
|
|
|
|
|
|
|
|
# 转换为 Hugging Face Dataset
|
|
|
|
train_dataset = Dataset.from_dict({
|
|
|
|
"text": [item["text"] for item in train_data],
|
|
|
|
"labels": [item["labels"] for item in train_data]
|
|
|
|
})
|
|
|
|
val_dataset = Dataset.from_dict({
|
|
|
|
"text": [item["text"] for item in val_data],
|
|
|
|
"labels": [item["labels"] for item in val_data]
|
|
|
|
})
|
|
|
|
|
|
|
|
# 加载预训练模型和分词器
|
|
|
|
model_name = "bert-base-chinese"
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
|
|
model = AutoModelForTokenClassification.from_pretrained(
|
|
|
|
model_name, num_labels=len(label2id), id2label=id2label, label2id=label2id
|
|
|
|
)
|
|
|
|
|
|
|
|
# 数据预处理
|
|
|
|
tokenized_train_dataset = train_dataset.map(
|
|
|
|
tokenize_and_align_labels, fn_kwargs={"tokenizer": tokenizer}, batched=True
|
|
|
|
)
|
|
|
|
tokenized_val_dataset = val_dataset.map(
|
|
|
|
tokenize_and_align_labels, fn_kwargs={"tokenizer": tokenizer}, batched=True
|
|
|
|
)
|
|
|
|
|
|
|
|
# 定义训练参数
|
|
|
|
training_args = TrainingArguments(
|
|
|
|
output_dir="./models/resume_ner_model",
|
|
|
|
evaluation_strategy="epoch",
|
|
|
|
learning_rate=2e-5,
|
|
|
|
per_device_train_batch_size=16,
|
|
|
|
num_train_epochs=3,
|
|
|
|
weight_decay=0.01,
|
|
|
|
save_strategy="epoch",
|
|
|
|
save_total_limit=2,
|
|
|
|
logging_dir="./logs",
|
|
|
|
logging_steps=10,
|
|
|
|
)
|
|
|
|
|
|
|
|
# 定义 Trainer
|
|
|
|
data_collator = DataCollatorForTokenClassification(tokenizer)
|
|
|
|
trainer = Trainer(
|
|
|
|
model=model,
|
|
|
|
args=training_args,
|
|
|
|
train_dataset=tokenized_train_dataset,
|
|
|
|
eval_dataset=tokenized_val_dataset,
|
|
|
|
tokenizer=tokenizer,
|
|
|
|
data_collator=data_collator,
|
|
|
|
)
|
|
|
|
|
|
|
|
# 开始训练
|
|
|
|
trainer.train()
|
|
|
|
|
|
|
|
# 保存模型
|
|
|
|
trainer.save_model("./models/resume_ner_model")
|
|
|
|
tokenizer.save_pretrained("./models/resume_ner_model")
|