Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
F
frag.jetzt-langchain
Manage
Activity
Members
Code
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package Registry
Operate
Environments
Terraform modules
Analyze
Contributor analytics
CI/CD analytics
Repository analytics
Help
Help
Support
GitLab documentation
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Privacy
Imprint
Contact
Snippets
Groups
Projects
Show more breadcrumbs
ARSnova
frag.jetzt-langchain
Commits
4e17d567
There was an error fetching the commit references. Please try again later.
Commit
4e17d567
authored
2 weeks ago
by
Ruben Bimberg
💻
Browse files
Options
Downloads
Patches
Plain Diff
Remove celadon
parent
63b689b9
No related merge requests found
Pipeline
#144137
failed with stages
in 38 seconds
Changes
2
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
.gitignore
+0
-1
0 additions, 1 deletion
.gitignore
app/routes/moderate.py
+0
-47
0 additions, 47 deletions
app/routes/moderate.py
with
0 additions
and
48 deletions
.gitignore
+
0
−
1
View file @
4e17d567
...
@@ -5,5 +5,4 @@ __pycache__
...
@@ -5,5 +5,4 @@ __pycache__
/test
/test
/train/distilbert-token-swearword
/train/distilbert-token-swearword
/train/celadon
/train/celadon
/app/celadon
/.key
/.key
\ No newline at end of file
This diff is collapsed.
Click to expand it.
app/routes/moderate.py
+
0
−
47
View file @
4e17d567
from
fastapi
import
APIRouter
,
Body
,
Request
from
fastapi
import
APIRouter
,
Body
,
Request
from
app.security.oauth2
import
ROOM_DEPENDENCIES
,
per_req_config_modifier
from
app.security.oauth2
import
ROOM_DEPENDENCIES
,
per_req_config_modifier
from
app.celadon.model
import
MultiHeadDebertaForSequenceClassification
from
transformers
import
AutoTokenizer
from
detoxify
import
Detoxify
from
detoxify
import
Detoxify
from
openai
import
OpenAI
from
openai
import
OpenAI
from
more_itertools
import
chunked
from
more_itertools
import
chunked
from
transformers
import
pipeline
from
transformers
import
pipeline
celadon_tokenizer
=
AutoTokenizer
.
from_pretrained
(
"
PleIAs/celadon
"
)
celadon
=
MultiHeadDebertaForSequenceClassification
.
from_pretrained
(
"
PleIAs/celadon
"
)
celadon
.
eval
()
detox
=
Detoxify
(
"
multilingual
"
)
detox
=
Detoxify
(
"
multilingual
"
)
sentiment_pipeline
=
pipeline
(
sentiment_pipeline
=
pipeline
(
...
@@ -169,42 +163,6 @@ def run_detox(text_list):
...
@@ -169,42 +163,6 @@ def run_detox(text_list):
return
[{
k
:
v
[
i
]
for
(
k
,
v
)
in
result_dict
.
items
()}
for
i
in
range
(
length
)]
return
[{
k
:
v
[
i
]
for
(
k
,
v
)
in
result_dict
.
items
()}
for
i
in
range
(
length
)]
def
run_celadon
(
text_list
):
LABEL_MAX
=
3
inputs
=
celadon_tokenizer
(
text_list
,
return_tensors
=
"
pt
"
,
padding
=
True
,
truncation
=
True
)
outputs
=
celadon
(
input_ids
=
inputs
[
"
input_ids
"
],
attention_mask
=
inputs
[
"
attention_mask
"
]
)
predictions
=
outputs
.
argmax
(
dim
=-
1
).
squeeze
()
iterations
=
predictions
.
size
()[
0
]
if
len
(
predictions
.
size
())
==
1
:
iterations
=
1
predictions
=
[
predictions
]
result
=
[
{
"
Race/Origin
"
:
predictions
[
i
][
0
].
item
()
/
LABEL_MAX
,
"
Gender/Sex
"
:
predictions
[
i
][
1
].
item
()
/
LABEL_MAX
,
"
Religion
"
:
predictions
[
i
][
2
].
item
()
/
LABEL_MAX
,
"
Ability
"
:
predictions
[
i
][
3
].
item
()
/
LABEL_MAX
,
"
Violence
"
:
predictions
[
i
][
4
].
item
()
/
LABEL_MAX
,
}
for
i
in
range
(
iterations
)
]
for
result_item
in
result
:
summed_values
=
sum
(
result_item
.
values
())
if
summed_values
>
2
:
result_item
[
"
Flagged
"
]
=
"
Toxic
"
elif
summed_values
>
1
or
max
(
result_item
.
values
())
==
1
:
result_item
[
"
Flagged
"
]
=
"
Mild
"
else
:
result_item
[
"
Flagged
"
]
=
"
No
"
return
result
def
run_moderate
(
text_list
,
api_key
):
def
run_moderate
(
text_list
,
api_key
):
result
=
{}
result
=
{}
...
@@ -213,11 +171,6 @@ def run_moderate(text_list, api_key):
...
@@ -213,11 +171,6 @@ def run_moderate(text_list, api_key):
result_list
.
extend
(
run_detox
(
texts
))
result_list
.
extend
(
run_detox
(
texts
))
result
[
"
detoxify
"
]
=
result_list
result
[
"
detoxify
"
]
=
result_list
result_list
=
[]
for
texts
in
chunked
(
text_list
,
2
):
result_list
.
extend
(
run_celadon
(
texts
))
result
[
"
celadon
"
]
=
result_list
if
api_key
is
not
None
:
if
api_key
is
not
None
:
result_list
=
[]
result_list
=
[]
for
texts
in
chunked
(
text_list
,
32
):
for
texts
in
chunked
(
text_list
,
32
):
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment