Skip to content

Commit ee07308

Browse files
committed
fix current workflow errors
1 parent 1e65f66 commit ee07308

File tree

9 files changed

+500
-529
lines changed

9 files changed

+500
-529
lines changed

examples/llama.swiftui/.gitignore

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
xcuserdata
1+
xcuserdata

examples/llama.swiftui/README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
# llama.swiftui
22

3-
Local inference of llama.cpp on an iPhone.
4-
So far I only tested with starcoder 1B model, but it can most likely handle 7B models as well.
3+
Local inference of llama.cpp on an iPhone.
4+
So far I only tested with starcoder 1B model, but it can most likely handle 7B models as well.
55

66
https://github.com/bachittle/llama.cpp/assets/39804642/e290827a-4edb-4093-9642-2a5e399ec545
77

examples/llama.swiftui/llama.cpp.swift/LibLlama.swift

Lines changed: 40 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -11,28 +11,28 @@ actor LlamaContext {
1111
private var context: OpaquePointer
1212
private var batch: llama_batch
1313
private var tokens_list: [llama_token]
14-
14+
1515
var n_len: Int32 = 512
1616
var n_cur: Int32 = 0
1717
var n_decode: Int32 = 0
18-
18+
1919
init(model: OpaquePointer, context: OpaquePointer) {
2020
self.model = model
2121
self.context = context
2222
self.tokens_list = []
2323
self.batch = llama_batch_init(512, 0, 1)
2424
}
25-
25+
2626
deinit {
2727
llama_free(context)
2828
llama_free_model(model)
2929
llama_backend_free()
3030
}
31-
31+
3232
static func createContext(path: String) throws -> LlamaContext {
3333
llama_backend_init(false)
3434
let model_params = llama_model_default_params()
35-
35+
3636
let model = llama_load_model_from_file(path, model_params)
3737
guard let model else {
3838
print("Could not load model at \(path)")
@@ -43,41 +43,41 @@ actor LlamaContext {
4343
ctx_params.n_ctx = 2048
4444
ctx_params.n_threads = 8
4545
ctx_params.n_threads_batch = 8
46-
46+
4747
let context = llama_new_context_with_model(model, ctx_params)
4848
guard let context else {
4949
print("Could not load context!")
5050
throw LlamaError.couldNotInitializeContext
5151
}
52-
52+
5353
return LlamaContext(model: model, context: context)
5454
}
55-
55+
5656
func get_n_tokens() -> Int32 {
5757
return batch.n_tokens;
5858
}
59-
59+
6060
func completion_init(text: String) {
6161
print("attempting to complete \"\(text)\"")
62-
62+
6363
tokens_list = tokenize(text: text, add_bos: true)
64-
64+
6565
let n_ctx = llama_n_ctx(context)
6666
let n_kv_req = tokens_list.count + (Int(n_len) - tokens_list.count)
67-
67+
6868
print("\n n_len = \(n_len), n_ctx = \(n_ctx), n_kv_req = \(n_kv_req)")
6969

7070
if n_kv_req > n_ctx {
7171
print("error: n_kv_req > n_ctx, the required KV cache size is not big enough")
7272
}
73-
73+
7474
for id in tokens_list {
7575
print(token_to_piece(token: id))
7676
}
77-
77+
7878
// batch = llama_batch_init(512, 0) // done in init()
7979
batch.n_tokens = Int32(tokens_list.count)
80-
80+
8181
for i1 in 0..<batch.n_tokens {
8282
let i = Int(i1)
8383
batch.token[i] = tokens_list[i]
@@ -87,90 +87,90 @@ actor LlamaContext {
8787
batch.logits[i] = 0
8888
}
8989
batch.logits[Int(batch.n_tokens) - 1] = 1 // true
90-
90+
9191
if llama_decode(context, batch) != 0 {
9292
print("llama_decode() failed")
9393
}
94-
94+
9595
n_cur = batch.n_tokens
9696
}
97-
97+
9898
func completion_loop() -> String {
9999
var new_token_id: llama_token = 0
100-
100+
101101
let n_vocab = llama_n_vocab(model)
102102
let logits = llama_get_logits_ith(context, batch.n_tokens - 1)
103-
103+
104104
var candidates = Array<llama_token_data>()
105105
candidates.reserveCapacity(Int(n_vocab))
106-
106+
107107
for token_id in 0..<n_vocab {
108108
candidates.append(llama_token_data(id: token_id, logit: logits![Int(token_id)], p: 0.0))
109109
}
110110
candidates.withUnsafeMutableBufferPointer() { buffer in
111111
var candidates_p = llama_token_data_array(data: buffer.baseAddress, size: buffer.count, sorted: false)
112-
112+
113113
new_token_id = llama_sample_token_greedy(context, &candidates_p)
114114
}
115-
115+
116116
if new_token_id == llama_token_eos(context) || n_cur == n_len {
117117
print("\n")
118118
return ""
119119
}
120-
120+
121121
let new_token_str = token_to_piece(token: new_token_id)
122122
print(new_token_str)
123123
// tokens_list.append(new_token_id)
124-
124+
125125
batch.n_tokens = 0
126-
126+
127127
batch.token[Int(batch.n_tokens)] = new_token_id
128128
batch.pos[Int(batch.n_tokens)] = n_cur
129129
batch.n_seq_id[Int(batch.n_tokens)] = 1
130130
batch.seq_id[Int(batch.n_tokens)]![0] = 0
131131
batch.logits[Int(batch.n_tokens)] = 1 // true
132132
batch.n_tokens += 1
133-
133+
134134
n_decode += 1
135-
135+
136136
n_cur += 1
137-
137+
138138
if llama_decode(context, batch) != 0 {
139139
print("failed to evaluate llama!")
140140
}
141-
141+
142142
return new_token_str
143143
}
144-
144+
145145
func clear() {
146146
tokens_list.removeAll()
147147
}
148-
148+
149149
private func tokenize(text: String, add_bos: Bool) -> [llama_token] {
150150
let n_tokens = text.count + (add_bos ? 1 : 0)
151151
let tokens = UnsafeMutablePointer<llama_token>.allocate(capacity: n_tokens)
152152
let tokenCount = llama_tokenize(model, text, Int32(text.count), tokens, Int32(n_tokens), add_bos, false)
153-
153+
154154
var swiftTokens: [llama_token] = []
155155
for i in 0..<tokenCount {
156156
swiftTokens.append(tokens[Int(i)])
157157
}
158-
158+
159159
tokens.deallocate()
160-
160+
161161
return swiftTokens
162162
}
163-
163+
164164
private func token_to_piece(token: llama_token) -> String {
165165
let result = UnsafeMutablePointer<Int8>.allocate(capacity: 8)
166166
result.initialize(repeating: Int8(0), count: 8)
167-
167+
168168
let _ = llama_token_to_piece(model, token, result, 8)
169-
169+
170170
let resultStr = String(cString: result)
171-
171+
172172
result.deallocate()
173-
173+
174174
return resultStr
175175
}
176176
}

0 commit comments

Comments
 (0)