I am using hugging face inference, and I am getting Error 400: {"error":"'str' object cannot be interpreted as an integer"}

#14
by Vineethaie - opened

When I use the same template locally outputs are getting generated when I hit api, I am getting this error.

template = """{
"Model": [],
"PartNumber/PN": [],
"SerialNumber/Engine Number/SN": [],
"TagNumber": [],
"PartName": "",
"Manufacturer": "",
"PID":[],
"Application/Function": "",
"AdditionalAttributes": []
}"""
def predict_NuExtract(texts, template):
template = json.dumps(json.loads(template), indent=4)
prompts = [f"""<|input|>\n### Template:\n{template}\n### Text:\n{text}\n\n<|output|>""" for text in texts]
outputs = []
batch_size=1
max_length=10000,
max_new_tokens=4000
# Process prompts in batches
with torch.no_grad():
for i in range(0, len(prompts), batch_size):
batch_prompts = prompts[i:i + batch_size]
payload = {"inputs": batch_prompts}
response = requests.post(endpoint_url, headers=headers, json=payload)

        if response.status_code == 200:
            response_data = response.json()
            outputs += [output.get("generated_text", "").strip() for output in response_data]
        else:
            print(f"Error {response.status_code}: {response.text}")
            raise Exception(f"Request failed with status {response.status_code}: {response.text}")

# Post-process and extract the output after <|output|> from each result
return [output.split("<|output|>")[1].strip() for output in outputs]

Sign up or log in to comment