张献维
2 weeks ago
commit
dd41af7db8
9 changed files with 291 additions and 0 deletions
@ -0,0 +1 @@ |
|||
/.idea/ |
@ -0,0 +1,21 @@ |
|||
[ |
|||
{ |
|||
"text": "2001.09--2005.07 佳木斯大学中文系汉语言文学专业学生", |
|||
"labels": [ |
|||
{"start": 0, "end": 7, "label": "TIME"}, |
|||
{"start": 9, "end": 16, "label": "TIME"}, |
|||
{"start": 18, "end": 22, "label": "SCHOOL"}, |
|||
{"start": 23, "end": 26, "label": "COLLEGE"}, |
|||
{"start": 27, "end": 34, "label": "MAJOR"} |
|||
] |
|||
}, |
|||
{ |
|||
"text": "2005.07--2006.07 黑龙江省大庆市工商行政管理局机关党委试用期人员", |
|||
"labels": [ |
|||
{"start": 0, "end": 7, "label": "TIME"}, |
|||
{"start": 9, "end": 16, "label": "TIME"}, |
|||
{"start": 18, "end": 28, "label": "ORG"}, |
|||
{"start": 29, "end": 33, "label": "ORG"} |
|||
] |
|||
} |
|||
] |
@ -0,0 +1,89 @@ |
|||
import re |
|||
|
|||
# 输入文本 |
|||
text = """ |
|||
2002.09--2006.07 曲阜师范大学历史文化学院旅游管理专业学习 |
|||
|
|||
2006.07--2007.01 待业 |
|||
|
|||
2007.01--2008.01 滕州市旅游局见习期人员 |
|||
|
|||
2008.01--2017.07 滕州市旅游和服务业发展局科员 |
|||
|
|||
2017.07--2019.10 滕州市环境监察大队科员 |
|||
|
|||
2019.10--2020.12 滕州市生态环境保护综合执法大队科员 |
|||
|
|||
2020.12--2021.04 滕州市生态环境保护综合执法大队一级行政执法员 |
|||
|
|||
2021.04-- 枣庄市生态环境保护综合执法支队滕州市生态环境保护综合执法大队一级行政执法员 |
|||
""" |
|||
|
|||
# 正则表达式匹配每段经历 |
|||
pattern = r"(\d{4}\.\d{2})--(\d{4}\.\d{2}|至今)\s*([^\n]+)" |
|||
|
|||
# 查找所有匹配项 |
|||
matches = re.findall(pattern, text) |
|||
|
|||
# 定义标签规则 |
|||
def get_label_and_details(content): |
|||
if "学习" in content: |
|||
# 匹配学校、学院、专业 |
|||
school_pattern = r"([\u4e00-\u9fa5]+大学|学院)([\u4e00-\u9fa5]+学院)?([\u4e00-\u9fa5]+专业)?" |
|||
school_match = re.search(school_pattern, content) |
|||
if school_match: |
|||
school = school_match.group(1) or "" |
|||
college = school_match.group(2) or "" |
|||
major = school_match.group(3) or "" |
|||
details = { |
|||
"学校": school, |
|||
"学院": college, |
|||
"专业": major |
|||
} |
|||
else: |
|||
details = {} |
|||
return "教育经历", details |
|||
elif "待业" in content: |
|||
return "待业", {} |
|||
elif "见习" in content: |
|||
# 匹配单位名称 |
|||
unit_pattern = r"([\u4e00-\u9fa5]+[局|队|公司|集团])" |
|||
unit_match = re.search(unit_pattern, content) |
|||
if unit_match: |
|||
unit = unit_match.group(1) |
|||
details = { |
|||
"单位": unit |
|||
} |
|||
else: |
|||
details = {} |
|||
return "见习经历", details |
|||
else: |
|||
# 匹配单位名称 |
|||
unit_pattern = r"([\u4e00-\u9fa5]+[局|队|公司|集团])" |
|||
unit_match = re.search(unit_pattern, content) |
|||
if unit_match: |
|||
unit = unit_match.group(1) |
|||
details = { |
|||
"单位": unit |
|||
} |
|||
else: |
|||
details = {} |
|||
return "工作经历", details |
|||
|
|||
# 整理结果 |
|||
results = [] |
|||
for match in matches: |
|||
start_time, end_time, content = match |
|||
label, details = get_label_and_details(content) |
|||
result = { |
|||
"开始时间": start_time, |
|||
"结束时间": end_time, |
|||
"主要内容": content.strip(), |
|||
"标签": label, |
|||
**details # 将具体名称合并到结果中 |
|||
} |
|||
results.append(result) |
|||
|
|||
# 输出结果 |
|||
for result in results: |
|||
print(result) |
@ -0,0 +1,3 @@ |
|||
transformers==4.26.0 |
|||
datasets==2.10.0 |
|||
torch==1.13.0 |
@ -0,0 +1,30 @@ |
|||
from transformers import pipeline |
|||
|
|||
# 加载模型 |
|||
ner_pipeline = pipeline("ner", model="./models/resume_ner_model", tokenizer="./models/resume_ner_model") |
|||
|
|||
|
|||
# 解析文本 |
|||
def parse_resume(text): |
|||
results = ner_pipeline(text) |
|||
parsed_data = [] |
|||
current_entity = {} |
|||
for result in results: |
|||
if result["entity"].startswith("B-"): |
|||
if current_entity: |
|||
parsed_data.append(current_entity) |
|||
current_entity = { |
|||
"label": result["entity"][2:], |
|||
"text": result["word"] |
|||
} |
|||
elif result["entity"].startswith("I-"): |
|||
current_entity["text"] += result["word"] |
|||
if current_entity: |
|||
parsed_data.append(current_entity) |
|||
return parsed_data |
|||
|
|||
|
|||
# 示例 |
|||
text = "2001.09--2005.07 佳木斯大学中文系汉语言文学专业学生" |
|||
results = parse_resume(text) |
|||
print(results) |
@ -0,0 +1,104 @@ |
|||
import json |
|||
from datasets import Dataset |
|||
from transformers import ( |
|||
AutoTokenizer, |
|||
AutoModelForTokenClassification, |
|||
TrainingArguments, |
|||
Trainer, |
|||
DataCollatorForTokenClassification |
|||
) |
|||
|
|||
# 加载数据 |
|||
def load_data(file_path): |
|||
with open(file_path, "r", encoding="utf-8") as f: |
|||
data = json.load(f) |
|||
return data |
|||
|
|||
# 数据预处理 |
|||
def tokenize_and_align_labels(examples, tokenizer): |
|||
tokenized_inputs = tokenizer(examples["text"], truncation=True, padding=True, is_split_into_words=True) |
|||
labels = [] |
|||
for i, label in enumerate(examples["labels"]): |
|||
word_ids = tokenized_inputs.word_ids(batch_index=i) |
|||
label_ids = [] |
|||
for word_idx in word_ids: |
|||
if word_idx is None: |
|||
label_ids.append(-100) # 特殊标记 |
|||
else: |
|||
label_name = label[word_idx]["label"] |
|||
label_id = label2id[label_name] # 将标签名称转换为 ID |
|||
label_ids.append(label_id) |
|||
labels.append(label_ids) |
|||
tokenized_inputs["labels"] = labels |
|||
return tokenized_inputs |
|||
|
|||
# 标签映射 |
|||
label2id = { |
|||
"TIME": 0, |
|||
"SCHOOL": 1, |
|||
"COLLEGE": 2, |
|||
"MAJOR": 3, |
|||
"ORG": 4 |
|||
} |
|||
id2label = {v: k for k, v in label2id.items()} |
|||
|
|||
# 加载数据 |
|||
train_data = load_data("data/train.json") |
|||
val_data = load_data("data/val.json") |
|||
|
|||
# 转换为 Hugging Face Dataset |
|||
train_dataset = Dataset.from_dict({ |
|||
"text": [item["text"] for item in train_data], |
|||
"labels": [item["labels"] for item in train_data] |
|||
}) |
|||
val_dataset = Dataset.from_dict({ |
|||
"text": [item["text"] for item in val_data], |
|||
"labels": [item["labels"] for item in val_data] |
|||
}) |
|||
|
|||
# 加载预训练模型和分词器 |
|||
model_name = "bert-base-chinese" |
|||
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|||
model = AutoModelForTokenClassification.from_pretrained( |
|||
model_name, num_labels=len(label2id), id2label=id2label, label2id=label2id |
|||
) |
|||
|
|||
# 数据预处理 |
|||
tokenized_train_dataset = train_dataset.map( |
|||
tokenize_and_align_labels, fn_kwargs={"tokenizer": tokenizer}, batched=True |
|||
) |
|||
tokenized_val_dataset = val_dataset.map( |
|||
tokenize_and_align_labels, fn_kwargs={"tokenizer": tokenizer}, batched=True |
|||
) |
|||
|
|||
# 定义训练参数 |
|||
training_args = TrainingArguments( |
|||
output_dir="./models/resume_ner_model", |
|||
evaluation_strategy="epoch", |
|||
learning_rate=2e-5, |
|||
per_device_train_batch_size=16, |
|||
num_train_epochs=3, |
|||
weight_decay=0.01, |
|||
save_strategy="epoch", |
|||
save_total_limit=2, |
|||
logging_dir="./logs", |
|||
logging_steps=10, |
|||
) |
|||
|
|||
# 定义 Trainer |
|||
data_collator = DataCollatorForTokenClassification(tokenizer) |
|||
trainer = Trainer( |
|||
model=model, |
|||
args=training_args, |
|||
train_dataset=tokenized_train_dataset, |
|||
eval_dataset=tokenized_val_dataset, |
|||
tokenizer=tokenizer, |
|||
data_collator=data_collator, |
|||
) |
|||
|
|||
# 开始训练 |
|||
trainer.train() |
|||
|
|||
# 保存模型 |
|||
trainer.save_model("./models/resume_ner_model") |
|||
tokenizer.save_pretrained("./models/resume_ner_model") |
@ -0,0 +1,43 @@ |
|||
项目目录 |
|||
|
|||
```markdown |
|||
resume_parser/ |
|||
│ |
|||
├── data/ |
|||
│ ├── train.json # 训练数据 |
|||
│ ├── val.json # 验证数据 |
|||
│ |
|||
├── models/ |
|||
│ ├── resume_ner_model/ # 保存训练好的模型 |
|||
│ |
|||
├── scripts/ |
|||
│ ├── train.py # 训练脚本 |
|||
│ ├── predict.py # 预测脚本 |
|||
│ |
|||
├── requirements.txt # 依赖文件 |
|||
├── README.md # 项目说明 |
|||
|
|||
|
|||
``` |
|||
|
|||
## 操作: |
|||
|
|||
### 安装依赖: |
|||
```shell |
|||
pip install -r requirements.txt |
|||
``` |
|||
|
|||
### 训练模型 |
|||
|
|||
```shell |
|||
python scripts/train.py |
|||
``` |
|||
|
|||
### 使用模型解析文本 |
|||
```shell |
|||
python scripts/predict.py |
|||
``` |
|||
|
|||
保存模型: |
|||
|
|||
|
Loading…
Reference in new issue