Skip to content

Commit

Permalink
Cleanup for pos_embeddings
Browse files Browse the repository at this point in the history
  • Loading branch information
dllehr-amd committed Nov 6, 2023
1 parent 656dd08 commit 767c8e4
Showing 1 changed file with 4 additions and 3 deletions.
7 changes: 4 additions & 3 deletions vllm/model_executor/models/llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,8 @@ def __init__(
self.head_dim,
self.scaling,
rotary_dim=self.head_dim,
num_kv_heads=self.num_kv_heads)
num_kv_heads=self.num_kv_heads,
max_position=self.max_position_embeddings)


def forward(
Expand Down Expand Up @@ -179,12 +180,12 @@ def __init__(self, config: LlamaConfig):
hidden_size=self.hidden_size,
num_heads=config.num_attention_heads,
num_kv_heads=config.num_key_value_heads,
max_position_embeddings=config.max_position_embeddings
)
self.mlp = LlamaMLP(
hidden_size=self.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
max_position_embeddings=config.max_position_embeddings
hidden_act=config.hidden_act
)
self.input_layernorm = RMSNorm(config.hidden_size,
eps=config.rms_norm_eps)
Expand Down

0 comments on commit 767c8e4

Please sign in to comment.