Browse Source

Some refactoring: from_tokens -> encode_from_tokens

pull/525/head
comfyanonymous 2 years ago
parent
commit
81d1f00df3
  1. 10
      comfy/sd.py
  2. 6
      comfy/sd1_clip.py
  3. 2
      comfy/sd2_clip.py

10
comfy/sd.py

@ -375,13 +375,9 @@ class CLIP:
def tokenize(self, text, return_word_ids=False):
return self.tokenizer.tokenize_with_weights(text, return_word_ids)
def encode(self, text, from_tokens=False):
def encode_from_tokens(self, tokens):
if self.layer_idx is not None:
self.cond_stage_model.clip_layer(self.layer_idx)
if from_tokens:
tokens = text
else:
tokens = self.tokenizer.tokenize_with_weights(text)
try:
self.patcher.patch_model()
cond = self.cond_stage_model.encode_token_weights(tokens)
@ -391,6 +387,10 @@ class CLIP:
raise e
return cond
def encode(self, text):
tokens = self.tokenizer.tokenize_with_weights(text)
return self.encode_from_tokens(tokens)
class VAE:
def __init__(self, ckpt_path=None, scale_factor=0.18215, device=None, config=None):
if config is None:

6
comfy/sd1_clip.py

@ -315,7 +315,7 @@ class SD1Tokenizer:
continue
#parse word
tokens.append([(t, weight) for t in self.tokenizer(word)["input_ids"][1:-1]])
#reshape token array to CLIP input size
batched_tokens = []
batch = [(self.start_token, 1.0, 0)]
@ -338,11 +338,11 @@ class SD1Tokenizer:
batch.extend([(pad_token, 1.0, 0)] * (remaining_length))
#start new batch
batch = [(self.start_token, 1.0, 0)]
batched_tokens.append(batch)
batched_tokens.append(batch)
else:
batch.extend([(t,w,i+1) for t,w in t_group])
t_group = []
#fill last batch
batch.extend([(self.end_token, 1.0, 0)] + [(pad_token, 1.0, 0)] * (self.max_length - len(batch) - 1))

2
comfy/sd2_clip.py

@ -1,4 +1,4 @@
import sd1_clip
from comfy import sd1_clip
import torch
import os

Loading…
Cancel
Save