Files & Batches
File Operations
Upload, retrieve, list, and delete files. Files are used with batch processing, fine-tuning, and assistants.
Upload a File
Python TypeScript Rust Go Java C# Ruby PHP Elixir WASM
import asyncio
import os
from liter_llm import LlmClient
async def main () -> None :
client = LlmClient ( api_key = os . environ [ "OPENAI_API_KEY" ])
with open ( "data.jsonl" , "rb" ) as f :
file_bytes = f . read ()
response = await client . create_file (
file = file_bytes ,
filename = "data.jsonl" ,
purpose = "batch" ,
)
print ( f "File ID: { response . id } " )
print ( f "Size: { response . bytes } bytes" )
asyncio . run ( main ())
import { LlmClient } from "@kreuzberg/liter-llm" ;
import { readFileSync } from "node:fs" ;
const client = new LlmClient ({ apiKey : process.env.OPENAI_API_KEY ! });
const fileBuffer = readFileSync ( "data.jsonl" );
const response = await client . createFile ({
file : fileBuffer ,
filename : "data.jsonl" ,
purpose : "batch" ,
});
console . log ( `File ID: ${ response . id } ` );
console . log ( `Size: ${ response . bytes } bytes` );
use liter_llm ::{ ClientConfigBuilder , CreateFileRequest , DefaultClient , LlmClient };
use tokio :: fs ;
#[tokio::main]
async fn main () -> Result < (), Box < dyn std :: error :: Error >> {
let config = ClientConfigBuilder :: new ( std :: env :: var ( "OPENAI_API_KEY" ) ? )
. build ();
let client = DefaultClient :: new ( config , Some ( "openai/gpt-4o" )) ? ;
let file_bytes = fs :: read ( "data.jsonl" ). await ? ;
let response = client
. create_file ( CreateFileRequest {
file : file_bytes ,
filename : "data.jsonl" . into (),
purpose : "batch" . into (),
})
. await ? ;
println! ( "File ID: {}" , response . id );
println! ( "Size: {} bytes" , response . bytes );
Ok (())
}
package main
import (
"context"
"fmt"
"os"
llm "github.com/kreuzberg-dev/liter-llm/packages/go"
)
func main () {
client := llm . NewClient ( llm . WithAPIKey ( os . Getenv ( "OPENAI_API_KEY" )))
fileBytes , err := os . ReadFile ( "data.jsonl" )
if err != nil {
panic ( err )
}
resp , err := client . CreateFile ( context . Background (), & llm . CreateFileRequest {
File : fileBytes ,
Filename : "data.jsonl" ,
Purpose : "batch" ,
})
if err != nil {
panic ( err )
}
fmt . Printf ( "File ID: %s\n" , resp . ID )
fmt . Printf ( "Size: %d bytes\n" , resp . Bytes )
}
import dev.kreuzberg.literllm.LlmClient ;
import dev.kreuzberg.literllm.Types.* ;
import java.nio.file.Files ;
import java.nio.file.Path ;
public class Main {
public static void main ( String [] args ) throws Exception {
try ( var client = LlmClient . builder ()
. apiKey ( System . getenv ( "OPENAI_API_KEY" ))
. build ()) {
byte [] fileBytes = Files . readAllBytes ( Path . of ( "data.jsonl" ));
var response = client . createFile ( new CreateFileRequest (
fileBytes ,
"data.jsonl" ,
"batch"
));
System . out . println ( "File ID: " + response . id ());
System . out . println ( "Size: " + response . bytes () + " bytes" );
}
}
}
using LiterLlm ;
await using var client = new LlmClient (
apiKey : Environment . GetEnvironmentVariable ( "OPENAI_API_KEY" ) ! );
var fileBytes = await File . ReadAllBytesAsync ( "data.jsonl" );
var response = await client . CreateFileAsync ( new CreateFileRequest (
File : fileBytes ,
Filename : "data.jsonl" ,
Purpose : "batch"
));
Console . WriteLine ( $"File ID: {response.Id}" );
Console . WriteLine ( $"Size: {response.Bytes} bytes" );
# frozen_string_literal: true
require "liter_llm"
require "json"
client = LiterLlm :: LlmClient . new ( ENV . fetch ( "OPENAI_API_KEY" ), {})
file_bytes = File . binread ( "data.jsonl" )
response = JSON . parse ( client . create_file ( JSON . generate (
filename : "data.jsonl" ,
purpose : "batch"
), file_bytes ))
puts "File ID: #{ response [ "id" ] } "
puts "Size: #{ response [ "bytes" ] } bytes"
<?php
declare ( strict_types = 1 );
use LiterLlm\LlmClient ;
$client = new LlmClient ( apiKey : getenv ( 'OPENAI_API_KEY' ) ?: '' );
$fileBytes = file_get_contents ( 'data.jsonl' );
$response = json_decode ( $client -> createFile ( json_encode ([
'filename' => 'data.jsonl' ,
'purpose' => 'batch' ,
]), $fileBytes ), true );
echo "File ID: { $response [ 'id' ] } " . PHP_EOL ;
echo "Size: { $response [ 'bytes' ] } bytes" . PHP_EOL ;
file_bytes = File . read! ( "data.jsonl" )
{ :ok , response } =
LiterLlm . create_file (
%{
file : file_bytes ,
filename : "data.jsonl" ,
purpose : "batch"
},
api_key : System . fetch_env! ( "OPENAI_API_KEY" )
)
IO . puts ( "File ID: #{ response [ "id" ] } " )
IO . puts ( "Size: #{ response [ "bytes" ] } bytes" )
import init , { LlmClient } from "@kreuzberg/liter-llm-wasm" ;
await init ();
const client = new LlmClient ({ apiKey : process.env.OPENAI_API_KEY ! });
const fileBuffer = new Uint8Array ( /* file bytes */ );
const response = await client . createFile ({
file : fileBuffer ,
filename : "data.jsonl" ,
purpose : "batch" ,
});
console . log ( `File ID: ${ response . id } ` );
console . log ( `Size: ${ response . bytes } bytes` );
File Methods
Method
Description
create_file
Upload a file with a purpose ("batch", "fine-tune", "assistants")
retrieve_file
Get metadata for an uploaded file by ID
delete_file
Delete an uploaded file by ID
list_files
List all uploaded files, optionally filtered by purpose
file_content
Download the raw content of an uploaded file
Batch Processing
Create batch jobs to process multiple requests asynchronously at reduced cost:
Python TypeScript Rust Go Java C# Ruby PHP Elixir WASM
import asyncio
import os
from liter_llm import LlmClient
async def main () -> None :
client = LlmClient ( api_key = os . environ [ "OPENAI_API_KEY" ])
response = await client . create_batch (
input_file_id = "file-abc123" ,
endpoint = "/v1/chat/completions" ,
completion_window = "24h" ,
)
print ( f "Batch ID: { response . id } " )
print ( f "Status: { response . status } " )
asyncio . run ( main ())
import { LlmClient } from "@kreuzberg/liter-llm" ;
const client = new LlmClient ({ apiKey : process.env.OPENAI_API_KEY ! });
const response = await client . createBatch ({
inputFileId : "file-abc123" ,
endpoint : "/v1/chat/completions" ,
completionWindow : "24h" ,
});
console . log ( `Batch ID: ${ response . id } ` );
console . log ( `Status: ${ response . status } ` );
use liter_llm ::{ ClientConfigBuilder , CreateBatchRequest , DefaultClient , LlmClient };
#[tokio::main]
async fn main () -> Result < (), Box < dyn std :: error :: Error >> {
let config = ClientConfigBuilder :: new ( std :: env :: var ( "OPENAI_API_KEY" ) ? )
. build ();
let client = DefaultClient :: new ( config , Some ( "openai/gpt-4o" )) ? ;
let response = client
. create_batch ( CreateBatchRequest {
input_file_id : "file-abc123" . into (),
endpoint : "/v1/chat/completions" . into (),
completion_window : "24h" . into (),
.. Default :: default ()
})
. await ? ;
println! ( "Batch ID: {}" , response . id );
println! ( "Status: {}" , response . status );
Ok (())
}
package main
import (
"context"
"fmt"
"os"
llm "github.com/kreuzberg-dev/liter-llm/packages/go"
)
func main () {
client := llm . NewClient ( llm . WithAPIKey ( os . Getenv ( "OPENAI_API_KEY" )))
resp , err := client . CreateBatch ( context . Background (), & llm . CreateBatchRequest {
InputFileID : "file-abc123" ,
Endpoint : "/v1/chat/completions" ,
CompletionWindow : "24h" ,
})
if err != nil {
panic ( err )
}
fmt . Printf ( "Batch ID: %s\n" , resp . ID )
fmt . Printf ( "Status: %s\n" , resp . Status )
}
import dev.kreuzberg.literllm.LlmClient ;
import dev.kreuzberg.literllm.Types.* ;
public class Main {
public static void main ( String [] args ) throws Exception {
try ( var client = LlmClient . builder ()
. apiKey ( System . getenv ( "OPENAI_API_KEY" ))
. build ()) {
var response = client . createBatch ( new CreateBatchRequest (
"file-abc123" ,
"/v1/chat/completions" ,
"24h"
));
System . out . println ( "Batch ID: " + response . id ());
System . out . println ( "Status: " + response . status ());
}
}
}
using LiterLlm ;
await using var client = new LlmClient (
apiKey : Environment . GetEnvironmentVariable ( "OPENAI_API_KEY" ) ! );
var response = await client . CreateBatchAsync ( new CreateBatchRequest (
InputFileId : "file-abc123" ,
Endpoint : "/v1/chat/completions" ,
CompletionWindow : "24h"
));
Console . WriteLine ( $"Batch ID: {response.Id}" );
Console . WriteLine ( $"Status: {response.Status}" );
# frozen_string_literal: true
require "liter_llm"
require "json"
client = LiterLlm :: LlmClient . new ( ENV . fetch ( "OPENAI_API_KEY" ), {})
response = JSON . parse ( client . create_batch ( JSON . generate (
input_file_id : "file-abc123" ,
endpoint : "/v1/chat/completions" ,
completion_window : "24h"
)))
puts "Batch ID: #{ response [ "id" ] } "
puts "Status: #{ response [ "status" ] } "
<?php
declare ( strict_types = 1 );
use LiterLlm\LlmClient ;
$client = new LlmClient ( apiKey : getenv ( 'OPENAI_API_KEY' ) ?: '' );
$response = json_decode ( $client -> createBatch ( json_encode ([
'input_file_id' => 'file-abc123' ,
'endpoint' => '/v1/chat/completions' ,
'completion_window' => '24h' ,
])), true );
echo "Batch ID: { $response [ 'id' ] } " . PHP_EOL ;
echo "Status: { $response [ 'status' ] } " . PHP_EOL ;
{ :ok , response } =
LiterLlm . create_batch (
%{
input_file_id : "file-abc123" ,
endpoint : "/v1/chat/completions" ,
completion_window : "24h"
},
api_key : System . fetch_env! ( "OPENAI_API_KEY" )
)
IO . puts ( "Batch ID: #{ response [ "id" ] } " )
IO . puts ( "Status: #{ response [ "status" ] } " )
import init , { LlmClient } from "@kreuzberg/liter-llm-wasm" ;
await init ();
const client = new LlmClient ({ apiKey : process.env.OPENAI_API_KEY ! });
const response = await client . createBatch ({
inputFileId : "file-abc123" ,
endpoint : "/v1/chat/completions" ,
completionWindow : "24h" ,
});
console . log ( `Batch ID: ${ response . id } ` );
console . log ( `Status: ${ response . status } ` );
Batch Methods
Method
Description
create_batch
Create a batch from an uploaded JSONL file
retrieve_batch
Get batch status and results by ID
list_batches
List all batches
cancel_batch
Cancel a running batch
Batch Parameters
Parameter
Type
Description
input_file_id
string
ID of the uploaded JSONL file
endpoint
string
API endpoint ("/v1/chat/completions", "/v1/embeddings")
completion_window
string
Processing window ("24h")
metadata
object
Optional key-value metadata
Responses API
Create, retrieve, and cancel responses via the Responses API:
Python TypeScript Rust Go Java C# Ruby PHP Elixir WASM
import asyncio
import os
from liter_llm import LlmClient
async def main () -> None :
client = LlmClient ( api_key = os . environ [ "OPENAI_API_KEY" ])
response = await client . create_response (
model = "openai/gpt-4o" ,
input = "Explain quantum computing in one sentence." ,
)
print ( response )
asyncio . run ( main ())
import { LlmClient } from "@kreuzberg/liter-llm" ;
const client = new LlmClient ({ apiKey : process.env.OPENAI_API_KEY ! });
const response = await client . createResponse ({
model : "openai/gpt-4o" ,
input : "Explain quantum computing in one sentence." ,
});
console . log ( response );
use liter_llm ::{
ClientConfigBuilder , CreateResponseRequest , DefaultClient , ResponseClient ,
};
#[tokio::main]
async fn main () -> Result < (), Box < dyn std :: error :: Error >> {
let config = ClientConfigBuilder :: new ( std :: env :: var ( "OPENAI_API_KEY" ) ? )
. build ();
let client = DefaultClient :: new ( config , Some ( "openai/gpt-4o" )) ? ;
let request = CreateResponseRequest {
model : "openai/gpt-4o" . into (),
input : Some ( "Explain quantum computing in one sentence." . into ()),
.. Default :: default ()
};
let response = client . create_response ( request ). await ? ;
println! ( "{:?}" , response );
Ok (())
}
package main
import (
"context"
"fmt"
"os"
llm "github.com/kreuzberg-dev/liter-llm/packages/go"
)
func main () {
client := llm . NewClient ( llm . WithAPIKey ( os . Getenv ( "OPENAI_API_KEY" )))
resp , err := client . CreateResponse ( context . Background (), & llm . CreateResponseRequest {
Model : "openai/gpt-4o" ,
Input : "Explain quantum computing in one sentence." ,
})
if err != nil {
panic ( err )
}
fmt . Println ( resp )
}
import dev.kreuzberg.literllm.LlmClient ;
import dev.kreuzberg.literllm.Types.* ;
public class Main {
public static void main ( String [] args ) throws Exception {
try ( var client = LlmClient . builder ()
. apiKey ( System . getenv ( "OPENAI_API_KEY" ))
. build ()) {
var response = client . createResponse ( new CreateResponseRequest (
"openai/gpt-4o" ,
"Explain quantum computing in one sentence."
));
System . out . println ( response );
}
}
}
using LiterLlm ;
await using var client = new LlmClient (
apiKey : Environment . GetEnvironmentVariable ( "OPENAI_API_KEY" ) ! );
var response = await client . CreateResponseAsync ( new CreateResponseRequest (
Model : "openai/gpt-4o" ,
Input : "Explain quantum computing in one sentence."
));
Console . WriteLine ( response );
# frozen_string_literal: true
require "liter_llm"
require "json"
client = LiterLlm :: LlmClient . new ( ENV . fetch ( "OPENAI_API_KEY" ), {})
response = JSON . parse ( client . create_response ( JSON . generate (
model : "openai/gpt-4o" ,
input : "Explain quantum computing in one sentence."
)))
puts response
<?php
declare ( strict_types = 1 );
use LiterLlm\LlmClient ;
$client = new LlmClient ( apiKey : getenv ( 'OPENAI_API_KEY' ) ?: '' );
$response = json_decode ( $client -> createResponse ( json_encode ([
'model' => 'openai/gpt-4o' ,
'input' => 'Explain quantum computing in one sentence.' ,
])), true );
print_r ( $response );
{ :ok , response } =
LiterLlm . create_response (
%{
model : "openai/gpt-4o" ,
input : "Explain quantum computing in one sentence."
},
api_key : System . fetch_env! ( "OPENAI_API_KEY" )
)
IO . inspect ( response )
import init , { LlmClient } from "@kreuzberg/liter-llm-wasm" ;
await init ();
const client = new LlmClient ({ apiKey : "sk-..." });
const response = await client . createResponse ({
model : "openai/gpt-4o" ,
input : "Explain quantum computing in one sentence." ,
});
console . log ( response );
Response Methods
Method
Description
create_response
Create a new response
retrieve_response
Get a response by ID
cancel_response
Cancel a response
Response Parameters
Parameter
Type
Description
model
string
Model to use
input
string
Input text or conversation
instructions
string
System-level instructions
max_output_tokens
int
Maximum tokens to generate
temperature
float
Sampling temperature