ChatGpt http calls python code

import os
import requests
import json

proxies={<!-- -->
"http":"http://127.0.0.1:1083", #Magic address
"https":"http://127.0.0.1:1083" #Magic address
}
skcode="sk-************************" #SK code
header={<!-- -->
    "User-Agent":"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/113.0",
    "Authorization":f"Bearer {<!-- -->skcode}",
    "Content-Type": "application/json"
}

class openaihttp:
\t 
    """Initialize openai is equivalent to the constructor"""
    def __init__(self):
        pass

    """Process network request status"""
    def LogNetCode(self, status_code):
        errinfo=""
        if status_code == 200:
            errinfo=('Success: {0}'.format(status_code))
        elif status_code == 301 or status_code == 302:
            errinfo=('Redirection: {0}'.format(status_code))
            print('redirect', status_code)
        elif status_code == 404:
            errinfo=('Not found: {0}'.format(status_code))
            print('not found', status_code)
        elif status_code == 401:
            errinfo=('Permission problem, the sk code may be invalid: {0}'.format(status_code))
            print("Permission problem, sk may be invalid", status_code)
        elif status_code == 500:
            errinfo=('Server error: {0}'.format(status_code))
            print('server error', status_code)
        else:
            errinfo=('Other errors: {0}'.format(status_code))
            print('Other errors', status_code)
        return errinfo


    """Get all supported models"""
    def ListModels(self):
        models=[]
        baseUrl="https://api.openai.com/v1/models"
        response=requests.get(baseUrl, headers=header, proxies=proxies, verify=False)
        loads = json.loads(response.content.decode('utf-8'))
        for iter in loads["data"]:
            models.append(iter["id"])
        return models

    """Get model details"""
    def RetrieveModel(self, modelName):
        baseUrl="https://api.openai.com/v1/models/{model}".format(model=modelName)
        response=requests.get(baseUrl, headers=header, proxies=proxies, verify=False)
        loads = json.loads(response.content.decode('utf-8'))
        return loads
    
        """Completion dialog completions """
    """The input parameter content is the content, and num is the number of acquisitions."""
    """Given a hint, the model will return one or more predicted completions, and may also return the probability of an alternative marker for each position.
    model: string Required The ID of the model to use. You can see all available models using the List models API, or see our models overview for their descriptions.
    prompt:string or arrayOptional Defaults to <|endoftext|> Generate a completed prompt, encoded as a string, an array of strings, an array of tokens, or an array of arrays of tokens. Note that <|endoftext|> is the document delimiter that the model sees during training, so if no hint is specified, the model will generate the beginning of new documents.
    suffix:string Optional Defaults to null A suffix to insert after text completion.
    max_tokens : integer Optional Defaults to 16 Maximum number of tokens to generate on completion. Your hint's token count plus max_tokens cannot exceed the model's context length. Most models have a context length of 2048 tokens (except the latest model, which supports 4096).
    temperature:number Optional Defaults to 1 What sampling temperature to use, between 0 and 2. Higher values (such as 0.8) will make the output more random, while lower values (such as 0.2) will make the output more focused and deterministic. We generally recommend changing this or top_p but not both.
    top_p:number Optional Defaults to 1 An alternative to temperature sampling, known as kernel sampling, where the model considers results from labels with top_p probability mass. So 0.1 means that only tokens constituting the top 10% of probability masses are considered. We generally recommend changing either this value or the temperature, but not both.
    n:integer Optional Defaults to 1 How many completions to generate for each hint. NOTE: Because this parameter generates a lot of completions, it can drain your token quota very quickly. Use with caution and make sure you have sensible settings for max_tokens and stop.
    stream: boolean Optional Defaults to false Whether to stream back part of the progress. If set, the token will be sent as a data-only server-sent event when available, with the stream terminated by a data: [DONE] message.
    logprobs:integer Optional Defaults to null Include log probabilities on the most likely markers in logprobs, and the chosen marker. For example, if logprobs is 5, the API will return a list of the 5 most likely tokens.
        The API will always return the logprob of the sampled token, so there may be at most logprobs + 1 elements in the response. The maximum value for logprobs is 5. If you need more, please contact us via our help center and describe your use case.
    echo:boolean Optional Defaults to false echoes the prompt in addition to completion
    stop:string or array Optional Defaults to null The API will stop generating up to 4 sequences of more tokens. The returned text will not contain stop sequences.
    presence_penalty:number Optional Defaults to 0 A number between -2.0 and 2.0. Positive values penalize new tokens based on whether they have appeared in the text so far, increasing the likelihood that the model talks about new topics.
    frequency_penalty:number Optional Defaults to 0 A number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text, making the model less likely to repeat the same line verbatim.
    best_of: integer Optional Defaults to 1 Generate best_of on the server side to complete and return the "best" (the one with the highest log probability for each token). Failed to stream results. best_of controls the number of candidate completions when used with n, where n specifies how many to return - best_of must be greater than n.
            NOTE: Because this parameter generates a lot of completions, it can drain your token quota very quickly. Use with caution and make sure you have sensible settings for max_tokens and stop.
    logit_bias:map Optional Defaults to null Modifies the likelihood that the specified marker appears in the completion. Accepts a json object that maps a tag (specified by a tag ID in the GPT tagger) to an associated bias value from -100 to 100.
                You can use this tokenizer tool (for GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the log generated by the model before sampling. The exact effect varies by model,
                But values between -1 and 1 should reduce or increase the probability of selection; values like -100 or 100 should result in forbidden or exclusive selection of the associated token.
                For example, you can pass {"50256": -100} to prevent <|endoftext|> tags from being generated.
    user:string Optional A unique identifier representing your end user that can help OpenAI monitor and detect abuse. learn more.
    """
    def CreateCompletion(self, model="text-davinci-003", prompt="Hello! Who are you?", suffix=None, max_tokens=2000, temperature=0.2, top_p=1, n=1,
                         stream=False, logprobs=None, echo=False, stop=None, presence_penalty=0, frequency_penalty=0, best_of=1, logit_bias=dict(), user=""):
        print("[CreateCompletion]:", prompt)
        results=[]

        baseUrl="https://api.openai.com/v1/completions"
        datap={<!-- -->
          "model": model, #model
          "prompt": prompt, #prompt information
          "suffix": suffix,
          "max_tokens": max_tokens, #maximum number of words input and output
          "temperature": temperature, #The smaller the confidence level, the more accurate it is, but the fewer answers are kept
          "top_p": top_p, #The weight of the result
          "n": n,
          "stream": stream,
          "logprobs": logprobs,
          "echo": echo,
          "stop": stop,
          "presence_penalty": presence_penalty,
          "frequency_penalty": frequency_penalty,
          "best_of": best_of,
          "logit_bias": logit_bias,
          "user": user
        }

        response=requests.post(baseUrl,headers=header,proxies=proxies,data=json.dumps(datap),verify=False)
        loads = json.loads(response.content.decode('utf-8'))
        status_code = response. status_code
        self. LogNetCode(status_code)
        if(status_code!=200):
            print("loads", loads)
            return results
        #print("response", response)
       
        for iter in loads. get("choices"):
            # Check if exited normally
            stopReason = dict(iter).get("finish_reason")
            sttr=(dict(iter).get("text"))
            while stopReason!="stop":
                datap["prompt"]=prompt + sttr
                datap["n"]=1
                responsesub=requests.post(baseUrl,headers=header,proxies=proxies,data=json.dumps(datap),verify=False)
                status_codesub = responsesub.status_code
                self. LogNetCode(status_codesub)
                if(status_codesub!=200):
                    results.append(sttr)
                    continue
                #print("response", response)
                loadssub = json.loads(responsesub.content.decode('utf-8'))
                #print("loadssub",loadssub)
                for iter in loadssub.get("choices"):
                # Check if exited normally
                    stopReason = dict(iter).get("finish_reason")
                    sttrsub=(dict(iter).get("text"))
                    sttr=sttr + sttrsub
            results.append(sttr)
        return results

        """Completion dialog completions """
    """The input parameter content is the content, and num is the number of acquisitions."""
    """Given a hint, the model returns one or more predicted completions, and may also return the probability of an alternative marker for each position."""
    """
    model: string Required The ID of the model to use. You can see all available models using the List models API, or see our models overview for their descriptions.
    messages array Required: A list of messages describing the conversation so far.
            role string Required The role of the author of this message. One of system, user, or assistant
            content string Required The contents of the message.
            name string Optional The name of the author of this message. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
    max_tokens : integer Optional Defaults to 16 Maximum number of tokens to generate on completion. Your hint's token count plus max_tokens cannot exceed the model's context length. Most models have a context length of 2048 tokens (except the latest model, which supports 4096).
    temperature:number Optional Defaults to 1 What sampling temperature to use, between 0 and 2. Higher values (such as 0.8) will make the output more random, while lower values (such as 0.2) will make the output more focused and deterministic. We generally recommend changing this or top_p but not both.
    top_p:number Optional Defaults to 1 An alternative to temperature sampling, known as kernel sampling, where the model considers results from labels with top_p probability mass. So 0.1 means that only tokens constituting the top 10% of probability masses are considered. We generally recommend changing either this value or the temperature, but not both.
    n:integer Optional Defaults to 1 How many completions to generate for each hint. NOTE: Because this parameter generates a lot of completions, it can drain your token quota very quickly. Use with caution and make sure you have sensible settings for max_tokens and stop.
    stream: boolean Optional Defaults to false Whether to stream back part of the progress. If set, the token will be sent as a data-only server-sent event when available, with the stream terminated by a data: [DONE] message.
    logprobs:integer Optional Defaults to null Include log probabilities on the most likely markers in logprobs, and the chosen marker. For example, if logprobs is 5, the API will return a list of the 5 most likely tokens.
        The API will always return the logprob of the sampled token, so there may be at most logprobs + 1 elements in the response. The maximum value for logprobs is 5. If you need more, please contact us via our help center and describe your use case.
    echo:boolean Optional Defaults to false echoes the prompt in addition to completion
    stop:string or array Optional Defaults to null The API will stop generating up to 4 sequences of more tokens. The returned text will not contain stop sequences.
    presence_penalty:number Optional Defaults to 0 A number between -2.0 and 2.0. Positive values penalize new tokens based on whether they have appeared in the text so far, increasing the likelihood that the model talks about new topics.
    frequency_penalty:number Optional Defaults to 0 A number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text, making the model less likely to repeat the same line verbatim.
    best_of: integer Optional Defaults to 1 Generate best_of on the server side to complete and return the "best" (the one with the highest log probability for each token). Failed to stream results. best_of controls the number of candidate completions when used with n, where n specifies how many to return - best_of must be greater than n.
            NOTE: Because this parameter generates a lot of completions, it can drain your token quota very quickly. Use with caution and make sure you have sensible settings for max_tokens and stop.
    logit_bias:map Optional Defaults to null Modifies the likelihood that the specified marker appears in the completion. Accepts a json object that maps a tag (specified by a tag ID in the GPT tagger) to an associated bias value from -100 to 100.
                You can use this tokenizer tool (for GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the log generated by the model before sampling. The exact effect varies by model,
                But values between -1 and 1 should reduce or increase the probability of selection; values like -100 or 100 should result in forbidden or exclusive selection of the associated token.
                For example, you can pass {"50256": -100} to prevent <|endoftext|> tags from being generated.
    user:string Optional A unique identifier representing your end user that can help OpenAI monitor and detect abuse. learn more.
    """
    def CreateChat(self, model="gpt-3.5-turbo", messages=list(), max_tokens=2000, temperature=1, top_p=1, n=1, stream=False, logprobs=None, echo=False, stop =None, presence_penalty=0,
                  frequency_penalty=0, best_of=1, logit_bias=dict(), user=""):
        models=[]
        baseUrl="https://api.openai.com/v1/chat/completions"
        datap={<!-- -->
          "model": model ,
          "messages": messages
        }



        response=requests.post(baseUrl,headers=header,proxies=proxies,data=json.dumps(datap),verify=False)
        loads = json.loads(response.content.decode('utf-8'))
        #print(loads)
        for iter in loads["choices"]:
            sttr=(dict(iter["message"]).get("content"))
            models.append(sttr)
        return models

        """Completion dialog completions """
    """The input parameter content is the content, and num is the number of acquisitions."""
    """Given a hint, the model returns one or more predicted completions, and may also return the probability of an alternative marker for each position."""
    """
    model: string Required The ID of the model to use. You can see all available models using the List models API, or see our models overview for their descriptions.
        ID of the model to use. You can use the text-davinci-edit-001 or code-davinci-edit-001 model with this endpoint.
    input string Optional Defaults to '' The input text to use as a starting point for the edit.
    instruction string Required The instruction that tells the model how to edit the prompt.
    temperature:number Optional Defaults to 1 What sampling temperature to use, between 0 and 2. Higher values (such as 0.8) will make the output more random, while lower values (such as 0.2) will make the output more focused and deterministic. We generally recommend changing this or top_p but not both.
    top_p:number Optional Defaults to 1 An alternative to temperature sampling, known as kernel sampling, where the model considers results from labels with top_p probability mass. So 0.1 means that only tokens constituting the top 10% of probability masses are considered. We generally recommend changing either this value or the temperature, but not both.
    n:integer Optional Defaults to 1 How many completions to generate for each hint. NOTE: Because this parameter generates a lot of completions, it can drain your token quota very quickly. Use with caution and make sure you have sensible settings for max_tokens and stop.
    """
    def CreateEdit(self,input="",instruction="modify error",temperature=1,top_p=1,n=1):
        models=[]
        baseUrl="https://api.openai.com/v1/edits"
        datap={<!-- -->
          "model": "text-davinci-edit-001",
          "input": input,
          "instruction": instruction,
          "temperature": temperature,
          "top_p": top_p,
          "n": n
        }
        response=requests.post(baseUrl,headers=header,proxies=proxies,data=json.dumps(datap),verify=False)
        loads = json.loads(response.content.decode('utf-8'))
        #print(loads)
        for iter in loads["choices"]:
            #sttr=(dict(iter["message"]).get("content"))
            models.append(iter.get("text"))
        return models
        """Completion dialog completions """
    """The input parameter content is the content, and num is the number of acquisitions."""
    """Create image"""
    """
    prompt string Required A text description of the desired image(s). The maximum length is 1000 characters
    n integer Optional Defaults to 1 The number of images to generate. Must be between 1 and 10.
    size string Optional Defaults to 1024x1024 The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
    response_format string Optional Defaults to url The format in which the generated images are returned. Must be one of url or b64_json.
    user string Optional A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more.
    """
    def CreateImage(self, prompt="randomly generate a picture", n=1, size="1024x1024", response_format="url", user=""):
        models=[]
        baseUrl="https://api.openai.com/v1/images/generations"
        datap={<!-- -->
          "prompt": prompt,
          "n": n,
          "size": size,
          "response_format": response_format,
          "user": user
        }

        response=requests.post(baseUrl,headers=header,proxies=proxies,data=json.dumps(datap),verify=False)
        loads = json.loads(response.content.decode('utf-8'))
        #print(loads)
        for iter in loads["data"]:
            #sttr=(dict(iter["message"]).get("content"))
            models.append(iter.get("url"))
        return models

    """Edit Image"""
    """
    image string Required The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency,
        Which will be used as the mask.
    mask string Optional An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited.
        Must be a valid PNG file, less than 4MB, and have the same dimensions as image.
    prompt string Required A text description of the desired image(s). The maximum length is 1000 characters.
    n integer Optional Defaults to 1 The number of images to generate. Must be between 1 and 10.
    size string Optional Defaults to 1024x1024 The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
    response_format string Optional Defaults to url The format in which the generated images are returned. Must be one of url or b64_json.
    user string Optional A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more.
    """
    def CreateImageEdit(self,prompt="randomly generate a picture", image="",mask="",n=1,size="1024x1024",response_format="url",user=""):
        models=[]
        baseUrl="https://api.openai.com/v1/images/generations"
        datap={<!-- -->
          "prompt": prompt,
          "image": image,
          "mask": mask,
          "n": n,
          "size": size,
          "response_format": response_format,
          "user": user
        }
        response=requests.post(baseUrl,headers=header,proxies=proxies,data=json.dumps(datap),verify=False)
        loads = json.loads(response.content.decode('utf-8'))
        #print(loads)
        for iter in loads["data"]:
            #sttr=(dict(iter["message"]).get("content"))
            models.append(iter.get("url"))
        return models

    """Image deformation"""
    """
    image string Required The image to edit. Must be a valid PNG file, less than 4MB, and square.
    n integer Optional Defaults to 1 The number of images to generate. Must be between 1 and 10.
    size string Optional Defaults to 1024x1024 The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
    response_format string Optional Defaults to url The format in which the generated images are returned. Must be one of url or b64_json.
    user string Optional A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more.
    """
    def CreateImageVariation(self,prompt="randomly generate a picture", image="",n=1,size="1024x1024",response_format="url",user=""):
        models=[]
        baseUrl="https://api.openai.com/v1/images/generations"
        datap={<!-- -->
          "prompt": prompt,
          "image": image,
          "n": n,
          "size": size,
          "response_format": response_format,
          "user": user
        }

        response=requests.post(baseUrl,headers=header,proxies=proxies,data=json.dumps(datap),verify=False)
        loads = json.loads(response.content.decode('utf-8'))
        #print(loads)
        for iter in loads["data"]:
            #sttr=(dict(iter["message"]).get("content"))
            models.append(iter.get("url"))
        return models
        

if __name__=="__main__":
    opai=openaihttp()
    # Note: you need to be using OpenAI Python v0.27.0 for the code below to work
    #Test to get all model names
    #for iter in opai. models():
    # print(iter)
    #Test to get model details
    #print(opai. RetrieveModel("text-davinci-003"))
    #Test completion dialogue
    #print(opai.CreateCompletion("What kind of animal does the puppy belong to", 2))
    # test dialogue
    #for iter in opai.CreateCompletion(prompt="""Write a 500-word Chinese short essay at the beginning: Xiao Ming goes to school in the morning""",suffix="""In the end, Xiao Ming was sent to the hospital""",n= 1):
    # print(iter)
     #Test edit dialog
    #for iter in opai.CreateEdit(input="What kind of animal is the puppy", instruction="Change the puppy into a kitten, add all species", temperature=0,n=1):
    # print(iter)
     # test generate image
    #for iter in opai.CreateImage(prompt="Draw a remote sensing satellite image with deep space background", n=2):
    # print(iter)
    ## test edit image
    #image=open("../statics/images/IMG_0273.png","wb")
    #for iter in opai.CreateImageEdit(prompt="Change the background to red", image=image, n=2):
    # print(iter)
     # test image variant
    #for iter in opai.CreateImage(prompt="Draw a remote sensing satellite image with deep space background", n=2):
    # print(iter)