Skip to content

Commit

Permalink
Merge main
Browse files Browse the repository at this point in the history
  • Loading branch information
XInTheDark committed Aug 27, 2024
2 parents 555c4a2 + 3b570bf commit 218ec46
Show file tree
Hide file tree
Showing 10 changed files with 144 additions and 85 deletions.
5 changes: 1 addition & 4 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
# THIS IS A FORK OF THE OFFICIAL GPT4FREE REPOSITORY. This project is for educational purposes only, and must not be used for any other purpose.
<a href="https://trendshift.io/repositories/1692" target="_blank"><img src="https://trendshift.io/api/badge/repositories/1692" alt="xtekky%2Fgpt4free | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>

The **ETA** till (v3 for g4f) where I, [@xtekky](https://github.com/xtekky) will pick this project back up and improve it is **`29` days** (written Tue 28 May), join [t.me/g4f_channel](https://t.me/g4f_channel) in the meanwhile to stay updated.

---

Written by [@xtekky](https://github.com/xtekky) & maintained by [@hlohaus](https://github.com/hlohaus)
Expand Down Expand Up @@ -357,8 +355,7 @@ While we wait for gpt-5, here is a list of new models that are at least better t
| [chatgpt4online.org](https://chatgpt4online.org) | `g4f.Provider.Chatgpt4Online` | ✔️ || ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) ||
| [chatgpt-free.cc](https://www.chatgpt-free.cc) | `g4f.Provider.ChatgptNext` | ✔️ || ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) ||
| [chatgptx.de](https://chatgptx.de) | `g4f.Provider.ChatgptX` | ✔️ || ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) ||
| [f1.cnote.top](https://f1.cnote.top) | `g4f.Provider.Cnote` | ✔️ || ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) ||
| [duckduckgo.com](https://duckduckgo.com/duckchat) | `g4f.Provider.DuckDuckGo` | ✔️ || ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) ||
| [duckduckgo.com](https://duckduckgo.com/duckchat) | `g4f.Provider.DDG` | ✔️ || ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) ||
| [feedough.com](https://www.feedough.com) | `g4f.Provider.Feedough` | ✔️ || ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) ||
| [flowgpt.com](https://flowgpt.com/chat) | `g4f.Provider.FlowGpt` | ✔️ || ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) ||
| [freegptsnav.aifree.site](https://freegptsnav.aifree.site) | `g4f.Provider.FreeGpt` | ✔️ || ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) ||
Expand Down
10 changes: 8 additions & 2 deletions g4f/Provider/Chatgpt4Online.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,12 @@ class Chatgpt4Online(AsyncGeneratorProvider):
api_endpoint = "/wp-json/mwai-ui/v1/chats/submit"
working = True
supports_gpt_4 = True


async def get_nonce():
async with ClientSession() as session:
async with session.post(f"https://chatgpt4online.org/wp-json/mwai/v1/start_session") as response:
return (await response.json())["restNonce"]

@classmethod
async def create_async_generator(
cls,
Expand All @@ -37,7 +42,7 @@ async def create_async_generator(
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
"x-wp-nonce": "d9505e9877",
"x-wp-nonce": await cls.get_nonce(),
}

async with ClientSession(headers=headers) as session:
Expand Down Expand Up @@ -69,3 +74,4 @@ async def create_async_generator(
continue

yield full_response

5 changes: 2 additions & 3 deletions g4f/Provider/DDG.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,13 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
supports_gpt_35_turbo = True
supports_message_history = True

# default_model = "gpt-3.5-turbo-0125"
default_model = "gpt-4o-mini"
models = ["gpt-4o-mini", "claude-3-haiku-20240307", "meta-llama/Llama-3-70b-chat-hf", "mistralai/Mixtral-8x7B-Instruct-v0.1"]
models = ["gpt-4o-mini", "claude-3-haiku-20240307", "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", "mistralai/Mixtral-8x7B-Instruct-v0.1"]
model_aliases = {
"gpt-4": "gpt-4o-mini",
"gpt-4o": "gpt-4o-mini",
"claude-3-haiku": "claude-3-haiku-20240307",
"llama-3-70b": "meta-llama/Llama-3-70b-chat-hf",
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
"mixtral-8x7B": "mistralai/Mixtral-8x7B-Instruct-v0.1"
}

Expand Down
4 changes: 2 additions & 2 deletions g4f/Provider/FlowGpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
"pygmalion-13b",
"chronos-hermes-13b",
"Mixtral-8x7B",
"Dolphin-2.6-8x7B"
"Dolphin-2.6-8x7B",
]
model_aliases = {
"gemini": "google-gemini",
Expand Down Expand Up @@ -91,7 +91,7 @@ async def create_async_generator(
"generateImage": False,
"generateAudio": False
}
async with session.post("https://backend-k8s.flowgpt.com/v2/chat-anonymous-encrypted", json=data, proxy=proxy) as response:
async with session.post("https://prod-backend-k8s.flowgpt.com/v3/chat-anonymous", json=data, proxy=proxy) as response:
await raise_for_status(response)
async for chunk in response.content:
if chunk.strip():
Expand Down
127 changes: 57 additions & 70 deletions g4f/Provider/HuggingChat.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,80 +33,78 @@ def create_completion(
model: str,
messages: Messages,
stream: bool,
**kwargs
) -> CreateResult:
**kwargs) -> CreateResult:

if (model in cls.models) :

session = requests.Session()
headers = {
'accept' : '*/*',
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control' : 'no-cache',
'origin' : 'https://huggingface.co',
'pragma' : 'no-cache',
'priority' : 'u=1, i',
'referer' : 'https://huggingface.co/chat/',
'sec-ch-ua' : '"Not/A)Brand";v="8", "Chromium";v="126", "Google Chrome";v="126"',
'sec-ch-ua-mobile' : '?0',
session = cf_reqs.Session()
session.headers = {
'accept': '*/*',
'accept-language': 'en',
'cache-control': 'no-cache',
'origin': 'https://huggingface.co',
'pragma': 'no-cache',
'priority': 'u=1, i',
'referer': 'https://huggingface.co/chat/',
'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest' : 'empty',
'sec-fetch-mode' : 'cors',
'sec-fetch-site' : 'same-origin',
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
}

print(model)
json_data = {
'searchEnabled' : True,
'activeModel' : 'CohereForAI/c4ai-command-r-plus', # doesn't matter
'hideEmojiOnSidebar': False,
'customPrompts' : {},
'assistants' : [],
'tools' : {},
'disableStream' : False,
'recentlySaved' : False,
'ethicsModalAccepted' : True,
'ethicsModalAcceptedAt' : None,
'shareConversationsWithModelAuthors': False,
'model': model,
}

response = cf_reqs.post('https://huggingface.co/chat/settings', headers=headers, json=json_data)
session.cookies.update(response.cookies)

response = session.post('https://huggingface.co/chat/conversation',
headers=headers, json={'model': model})

response = session.post('https://huggingface.co/chat/conversation', json=json_data)
conversationId = response.json()['conversationId']
response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=11',
headers=headers,
)

messageId = extract_id(response.json())
response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=01',)

data: list = (response.json())["nodes"][1]["data"]
keys: list[int] = data[data[0]["messages"]]
message_keys: dict = data[keys[0]]
messageId: str = data[message_keys["id"]]

settings = {
"inputs" : format_prompt(messages),
"id" : messageId,
"is_retry" : False,
"is_continue" : False,
"web_search" : False,

# TODO // add feature to enable/disable tools
"tools": {
"websearch" : True,
"document_parser" : False,
"query_calculator" : False,
"image_generation" : False,
"image_editing" : False,
"fetch_url" : False,
}
"inputs":format_prompt(messages),
"id":messageId,
"is_retry":False,
"is_continue":False,
"web_search":False,
"tools":[]
}

payload = {
"data": json.dumps(settings),
headers = {
'accept': '*/*',
'accept-language': 'en',
'cache-control': 'no-cache',
'origin': 'https://huggingface.co',
'pragma': 'no-cache',
'priority': 'u=1, i',
'referer': f'https://huggingface.co/chat/conversation/{conversationId}',
'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
}


files = {
'data': (None, json.dumps(settings, separators=(',', ':'))),
}

response = session.post(f"https://huggingface.co/chat/conversation/{conversationId}",
headers=headers, data=payload, stream=True,
response = requests.post(f'https://huggingface.co/chat/conversation/{conversationId}',
cookies=session.cookies,
headers=headers,
files=files,
)

first_token = True
Expand All @@ -125,18 +123,7 @@ def create_completion(
else:
token = token.replace('\u0000', '')

yield token
yield (token)

elif line["type"] == "finalAnswer":
break

def extract_id(response: dict) -> str:
data = response["nodes"][1]["data"]
uuid_pattern = re.compile(
r"^[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}$"
)
for item in data:
if type(item) == str and uuid_pattern.match(item):
return item

return None
break
3 changes: 2 additions & 1 deletion g4f/Provider/MagickPenAsk.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,8 @@ async def create_async_generator(
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
'X-API-Secret': 'WCASR6ZQJYM85DVDX7'
}
async with ClientSession(headers=headers) as session:
data = {
Expand Down
15 changes: 12 additions & 3 deletions g4f/Provider/PerplexityLabs.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,20 @@
WS_URL = "wss://www.perplexity.ai/socket.io/"

class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://labs.perplexity.ai"
url = "https://labs.perplexity.ai"
working = True
default_model = "mixtral-8x7b-instruct"
models = [
"llama-3-sonar-large-32k-online", "llama-3-sonar-small-32k-online", "llama-3-sonar-large-32k-chat", "llama-3-sonar-small-32k-chat", "llama-3-8b-instruct", "llama-3-70b-instruct", "gemma-2-9b-it", "gemma-2-27b-it", "nemotron-4-340b-instruct", "mixtral-8x7b-instruct",
"llama-3.1-sonar-large-128k-online",
"llama-3.1-sonar-small-128k-online",
"llama-3.1-sonar-large-128k-chat",
"llama-3.1-sonar-small-128k-chat",
"llama-3.1-8b-instruct",
"llama-3.1-70b-instruct",
"gemma-2-9b-it",
"gemma-2-27b-it",
"nemotron-4-340b-instruct",
"mixtral-8x7b-instruct"
]

@classmethod
Expand Down Expand Up @@ -54,7 +63,7 @@ async def create_async_generator(
data=post_data
) as response:
await raise_for_status(response)
assert await response.text() == "OK"
assert await response.text() == "OK"
async with session.ws_connect(f"{WS_URL}?EIO=4&transport=websocket&sid={sid}", autoping=False) as ws:
await ws.send_str("2probe")
assert(await ws.receive_str() == "3probe")
Expand Down
56 changes: 56 additions & 0 deletions g4f/Provider/Rocks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
import json
from aiohttp import ClientSession

from ..typing import Messages, AsyncResult
from .base_provider import AsyncGeneratorProvider

class Rocks(AsyncGeneratorProvider):
url = "https://api.discord.rocks"
api_endpoint = "/chat/completions"
supports_message_history = False
supports_gpt_35_turbo = True
working = True

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
payload = {"messages":messages,"model":model,"max_tokens":4096,"temperature":1,"top_p":1,"stream":True}

headers = {
"Accept": "application/json",
"Accept-Encoding": "gzip, deflate, br, zstd",
"Accept-Language": "en-US,en;q=0.9",
"Origin": cls.url,
"Referer": f"{cls.url}/en",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
}

async with ClientSession() as session:
async with session.post(
f"{cls.url}{cls.api_endpoint}",
json=payload,
proxy=proxy,
headers=headers
) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
try:
line = json.loads(line[6:])
except:
continue
chunk = line["choices"][0]["delta"].get("content")
if chunk:
yield chunk
elif line.startswith(b"\n"):
pass
else:
raise Exception(f"Unexpected line: {line}")
1 change: 1 addition & 0 deletions g4f/Provider/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@
from .Reka import Reka
from .Replicate import Replicate
from .ReplicateHome import ReplicateHome
from .Rocks import Rocks
from .TeachAnything import TeachAnything
from .Vercel import Vercel
from .WhiteRabbitNeo import WhiteRabbitNeo
Expand Down
3 changes: 3 additions & 0 deletions g4f/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -504,6 +504,9 @@ class ModelUtils:
'llama-3-70b-instruct': llama_3_70b_instruct,
'llama-3-70b-chat': llama_3_70b_chat_hf,
'llama-3-70b-instruct': llama_3_70b_instruct,

'llama-3.1-70b': llama_3_1_70b_instruct,
'llama-3.1-405b': llama_3_1_405b_instruct_FP8,
'llama-3.1-70b-instruct': llama_3_1_70b_instruct,
'llama-3.1-405b-instruct': llama_3_1_405b_instruct_FP8,

Expand Down

0 comments on commit 218ec46

Please sign in to comment.