Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 16 additions & 7 deletions Core/GameEngine/Source/Common/System/AsciiString.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
#include "PreRTS.h" // This must go first in EVERY cpp file in the GameEngine

#include "Common/CriticalSection.h"
#include "utf8.h"


// -----------------------------------------------------
Expand Down Expand Up @@ -137,8 +138,8 @@ void AsciiString::ensureUniqueBufferOfSize(int numCharsNeeded, Bool preserveData
// TheSuperHackers @fix Mauller 04/04/2025 Replace strcpy with safer memmove as memory regions can overlap when part of string is copied to itself
DEBUG_ASSERTCRASH(usableNumChars <= strlen(strToCopy), ("strToCopy is too small"));
memmove(m_data->peek(), strToCopy, usableNumChars);
m_data->peek()[usableNumChars] = 0;
}
m_data->peek()[usableNumChars] = 0;
if (strToCat)
strcat(m_data->peek(), strToCat);
return;
Expand Down Expand Up @@ -166,8 +167,8 @@ void AsciiString::ensureUniqueBufferOfSize(int numCharsNeeded, Bool preserveData
{
DEBUG_ASSERTCRASH(usableNumChars <= strlen(strToCopy), ("strToCopy is too small"));
strncpy(newData->peek(), strToCopy, usableNumChars);
newData->peek()[usableNumChars] = 0;
}
newData->peek()[usableNumChars] = 0;
if (strToCat)
strcat(newData->peek(), strToCat);

Expand Down Expand Up @@ -272,11 +273,19 @@ char* AsciiString::getBufferForRead(Int len)
void AsciiString::translate(const UnicodeString& stringSrc)
{
validate();
/// @todo srj put in a real translation here; this will only work for 7-bit ascii
clear();
Int len = stringSrc.getLength();
for (Int i = 0; i < len; i++)
concat((char)stringSrc.getCharAt(i));
// TheSuperHackers @fix bobtista 02/04/2026 Implement UTF-8 conversion replacing 7-bit ASCII only implementation
const WideChar* src = stringSrc.str();
size_t srcLen = wcslen(src);
size_t len = Utf16Le_To_Utf8_Len(src, srcLen);
if (len == 0)
{
clear();
return;
}
ensureUniqueBufferOfSize((Int)len + 1, false, nullptr, nullptr);
char* buf = peek();
if (Utf16Le_To_Utf8(buf, len + 1, src, srcLen, true) == 0)
clear();
validate();
}

Expand Down
23 changes: 16 additions & 7 deletions Core/GameEngine/Source/Common/System/UnicodeString.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
#include "PreRTS.h" // This must go first in EVERY cpp file in the GameEngine

#include "Common/CriticalSection.h"
#include "utf8.h"


// -----------------------------------------------------
Expand Down Expand Up @@ -88,8 +89,8 @@ void UnicodeString::ensureUniqueBufferOfSize(int numCharsNeeded, Bool preserveDa
// TheSuperHackers @fix Mauller 04/04/2025 Replace wcscpy with safer memmove as memory regions can overlap when part of string is copied to itself
DEBUG_ASSERTCRASH(usableNumChars <= wcslen(strToCopy), ("strToCopy is too small"));
memmove(m_data->peek(), strToCopy, usableNumChars * sizeof(WideChar));
m_data->peek()[usableNumChars] = 0;
}
m_data->peek()[usableNumChars] = 0;
if (strToCat)
wcscat(m_data->peek(), strToCat);
return;
Expand Down Expand Up @@ -117,8 +118,8 @@ void UnicodeString::ensureUniqueBufferOfSize(int numCharsNeeded, Bool preserveDa
{
DEBUG_ASSERTCRASH(usableNumChars <= wcslen(strToCopy), ("strToCopy is too small"));
wcsncpy(newData->peek(), strToCopy, usableNumChars);
newData->peek()[usableNumChars] = 0;
}
newData->peek()[usableNumChars] = 0;
if (strToCat)
wcscat(newData->peek(), strToCat);

Expand Down Expand Up @@ -221,11 +222,19 @@ WideChar* UnicodeString::getBufferForRead(Int len)
void UnicodeString::translate(const AsciiString& stringSrc)
{
validate();
/// @todo srj put in a real translation here; this will only work for 7-bit ascii
clear();
Int len = stringSrc.getLength();
for (Int i = 0; i < len; i++)
concat((WideChar)stringSrc.getCharAt(i));
// TheSuperHackers @fix bobtista 02/04/2026 Implement UTF-8 conversion replacing 7-bit ASCII only implementation
const char* src = stringSrc.str();
size_t srcLen = strlen(src);
size_t len = Utf8_To_Utf16Le_Len(src, srcLen);
if (len == 0)
{
clear();
return;
}
ensureUniqueBufferOfSize((Int)len + 1, false, nullptr, nullptr);
WideChar* buf = peek();
if (Utf8_To_Utf16Le(buf, len + 1, src, srcLen, true) == 0)
clear();
validate();
}

Expand Down
36 changes: 18 additions & 18 deletions Core/GameEngine/Source/GameNetwork/GameSpy/Thread/ThreadUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,18 +28,24 @@

#include "PreRTS.h" // This must go first in EVERY cpp file in the GameEngine

#include "utf8.h"

//-------------------------------------------------------------------------

// TheSuperHackers @refactor bobtista 02/04/2026 Use WWLib UTF-8 functions instead of raw Win32 API calls
std::wstring MultiByteToWideCharSingleLine( const char *orig )
{
Int len = strlen(orig);
WideChar *dest = NEW WideChar[len+1];

MultiByteToWideChar(CP_UTF8, 0, orig, -1, dest, len);
size_t srcLen = strlen(orig);
size_t len = Utf8_To_Utf16Le_Len(orig, srcLen);
if (len == 0)
return std::wstring();
std::wstring ret;
ret.resize(len);
Utf8_To_Utf16Le(&ret[0], len, orig, srcLen, true);
WideChar *c = nullptr;
do
{
c = wcschr(dest, L'\n');
c = wcschr(&ret[0], L'\n');
if (c)
{
*c = L' ';
Expand All @@ -48,32 +54,26 @@ std::wstring MultiByteToWideCharSingleLine( const char *orig )
while ( c != nullptr );
do
{
c = wcschr(dest, L'\r');
c = wcschr(&ret[0], L'\r');
if (c)
{
*c = L' ';
}
}
while ( c != nullptr );

dest[len] = 0;
std::wstring ret = dest;
delete[] dest;
return ret;
}

std::string WideCharStringToMultiByte( const WideChar *orig )
{
size_t srcLen = wcslen(orig);
size_t len = Utf16Le_To_Utf8_Len(orig, srcLen);
if (len == 0)
return std::string();
std::string ret;
Int len = WideCharToMultiByte( CP_UTF8, 0, orig, wcslen(orig), nullptr, 0, nullptr, nullptr ) + 1;
if (len > 0)
{
char *dest = NEW char[len];
WideCharToMultiByte( CP_UTF8, 0, orig, -1, dest, len, nullptr, nullptr );
dest[len-1] = 0;
ret = dest;
delete[] dest;
}
ret.resize(len);
Utf16Le_To_Utf8(&ret[0], len, orig, srcLen, true);
return ret;
}

Expand Down
2 changes: 2 additions & 0 deletions Core/Libraries/Source/WWVegas/WWLib/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,8 @@ set(WWLIB_SRC
trim.cpp
trim.h
uarray.h
utf8.cpp
utf8.h
Comment on lines +136 to +137
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 utf8.cpp unconditionally listed outside the if(WIN32) block

utf8.cpp's entire implementation is wrapped in #ifdef _WIN32 … #else #error "Not implemented" #endif, and the callers (AsciiString.cpp, UnicodeString.cpp, ThreadUtils.cpp) now unconditionally include utf8.h and call its functions. Any non-Windows build will hit the #error immediately. Other Win32-specific sources (registry.cpp, verchk.cpp, etc.) are correctly placed inside the if(WIN32) block below — utf8.cpp should follow the same pattern.

# Remove from unconditional WWLIB_SRC and add to the existing if(WIN32) block:
if(WIN32)
    list(APPEND WWLIB_SRC
        ...
        utf8.cpp
        utf8.h
        ...
    )
endif()

A proper cross-platform implementation (or #ifdef guards in the callers) would be needed before removing the #error.

Prompt To Fix With AI
This is a comment left during a code review.
Path: Core/Libraries/Source/WWVegas/WWLib/CMakeLists.txt
Line: 136-137

Comment:
**`utf8.cpp` unconditionally listed outside the `if(WIN32)` block**

`utf8.cpp`'s entire implementation is wrapped in `#ifdef _WIN32 … #else #error "Not implemented" #endif`, and the callers (`AsciiString.cpp`, `UnicodeString.cpp`, `ThreadUtils.cpp`) now unconditionally include `utf8.h` and call its functions. Any non-Windows build will hit the `#error` immediately. Other Win32-specific sources (`registry.cpp`, `verchk.cpp`, etc.) are correctly placed inside the `if(WIN32)` block below — `utf8.cpp` should follow the same pattern.

```cmake
# Remove from unconditional WWLIB_SRC and add to the existing if(WIN32) block:
if(WIN32)
    list(APPEND WWLIB_SRC
        ...
        utf8.cpp
        utf8.h
        ...
    )
endif()
```

A proper cross-platform implementation (or `#ifdef` guards in the callers) would be needed before removing the `#error`.

How can I resolve this? If you propose a fix, please make it concise.

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think we need to change this - cross-platform implementation is a todo and the #error is intentional as a placeholder

vector.cpp
Vector.h
visualc.h
Expand Down
174 changes: 174 additions & 0 deletions Core/Libraries/Source/WWVegas/WWLib/utf8.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,174 @@
/*
** Command & Conquer Generals Zero Hour(tm)
** Copyright 2026 TheSuperHackers
**
** This program is free software: you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation, either version 3 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with this program. If not, see <http://www.gnu.org/licenses/>.
*/

#include "always.h"
#include "utf8.h"

#include <string.h>

#ifdef _WIN32
#include <windows.h>

static bool Is_Trail_Byte(char c)
{
return (c & 0xC0) == 0x80;
}

size_t Utf8_Num_Bytes(char lead)
{
if ((lead & 0x80) == 0x00) return 1;
if ((lead & 0xE0) == 0xC0) return 2;
if ((lead & 0xF0) == 0xE0) return 3;
if ((lead & 0xF8) == 0xF0) return 4;
return 0;
}

size_t Utf8_Trailing_Invalid_Bytes(const char* str, size_t length)
{
if (length == 0)
return 0;

size_t i = length;
while (i > 0 && Is_Trail_Byte(str[i - 1]))
--i;

if (i == 0)
return length;

size_t claimed = Utf8_Num_Bytes(str[i - 1]);
size_t actual = length - (i - 1);

if (claimed == 0 || claimed != actual)
return actual;

return 0;
}

bool Utf8_Validate(const char* str)
{
return Utf8_Validate(str, strlen(str));
}

bool Utf8_Validate(const char* str, size_t length)
{
const unsigned char* s = (const unsigned char*)str;
size_t i = 0;
while (i < length)
{
size_t bytes = Utf8_Num_Bytes(str[i]);
if (bytes == 0)
return false;
if (i + bytes > length)
return false;
for (size_t j = 1; j < bytes; ++j)
{
if (!Is_Trail_Byte(str[i + j]))
return false;
}
// Reject overlong encodings per RFC 3629
if (bytes == 2 && s[i] < 0xC2)
return false;
if (bytes == 3 && s[i] == 0xE0 && s[i + 1] < 0xA0)
return false;
if (bytes == 4 && s[i] == 0xF0 && s[i + 1] < 0x90)
return false;
// Reject codepoints above U+10FFFF
if (bytes == 4 && s[i] > 0xF4)
return false;
if (bytes == 4 && s[i] == 0xF4 && s[i + 1] > 0x8F)
return false;
Comment on lines +84 to +94
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 Missing surrogate rejection breaks RFC 3629 compliance

Utf8_Validate accepts UTF-16 surrogate codepoints (U+D800–U+DFFF) encoded as \xED\xA0\x80\xED\xBF\xBF. The \xED lead byte only triggers the 3-byte overlong check for s[i] == 0xE0, not for 0xED, so every surrogate sequence passes. RFC 3629 §4 defines the valid range for 3-byte sequences as %xED %x80-9F UTF8-tail — second byte 0xA0–0xBF is explicitly excluded. Since Utf8_Validate is called to gate player-name input, malformed surrogate sequences will slip through.

Add the missing guard alongside the existing 3-byte check:

Suggested change
if (bytes == 2 && s[i] < 0xC2)
return false;
if (bytes == 3 && s[i] == 0xE0 && s[i + 1] < 0xA0)
return false;
if (bytes == 4 && s[i] == 0xF0 && s[i + 1] < 0x90)
return false;
// Reject codepoints above U+10FFFF
if (bytes == 4 && s[i] > 0xF4)
return false;
if (bytes == 4 && s[i] == 0xF4 && s[i + 1] > 0x8F)
return false;
// Reject overlong encodings per RFC 3629
if (bytes == 2 && s[i] < 0xC2)
return false;
if (bytes == 3 && s[i] == 0xE0 && s[i + 1] < 0xA0)
return false;
if (bytes == 3 && s[i] == 0xED && s[i + 1] > 0x9F)
return false;
if (bytes == 4 && s[i] == 0xF0 && s[i + 1] < 0x90)
return false;
// Reject codepoints above U+10FFFF
if (bytes == 4 && s[i] > 0xF4)
return false;
if (bytes == 4 && s[i] == 0xF4 && s[i + 1] > 0x8F)
return false;
Prompt To Fix With AI
This is a comment left during a code review.
Path: Core/Libraries/Source/WWVegas/WWLib/utf8.cpp
Line: 84-94

Comment:
**Missing surrogate rejection breaks RFC 3629 compliance**

`Utf8_Validate` accepts UTF-16 surrogate codepoints (U+D800–U+DFFF) encoded as `\xED\xA0\x80``\xED\xBF\xBF`. The `\xED` lead byte only triggers the 3-byte overlong check for `s[i] == 0xE0`, not for `0xED`, so every surrogate sequence passes. RFC 3629 §4 defines the valid range for 3-byte sequences as `%xED %x80-9F UTF8-tail` — second byte `0xA0–0xBF` is explicitly excluded. Since `Utf8_Validate` is called to gate player-name input, malformed surrogate sequences will slip through.

Add the missing guard alongside the existing 3-byte check:

```suggestion
		// Reject overlong encodings per RFC 3629
		if (bytes == 2 && s[i] < 0xC2)
			return false;
		if (bytes == 3 && s[i] == 0xE0 && s[i + 1] < 0xA0)
			return false;
		if (bytes == 3 && s[i] == 0xED && s[i + 1] > 0x9F)
			return false;
		if (bytes == 4 && s[i] == 0xF0 && s[i + 1] < 0x90)
			return false;
		// Reject codepoints above U+10FFFF
		if (bytes == 4 && s[i] > 0xF4)
			return false;
		if (bytes == 4 && s[i] == 0xF4 && s[i + 1] > 0x8F)
			return false;
```

How can I resolve this? If you propose a fix, please make it concise.

i += bytes;
}
return true;
}
Comment thread
bobtista marked this conversation as resolved.

size_t Utf16Le_To_Utf8_Len(const wchar_t* src, size_t srcLen)
{
int bytes = WideCharToMultiByte(CP_UTF8, 0, src, (int)srcLen, nullptr, 0, nullptr, nullptr);
return (bytes > 0) ? (size_t)bytes : 0;
}

size_t Utf8_To_Utf16Le_Len(const char* src, size_t srcLen)
{
int wchars = MultiByteToWideChar(CP_UTF8, 0, src, (int)srcLen, nullptr, 0);
return (wchars > 0) ? (size_t)wchars : 0;
}

size_t Utf16Le_To_Utf8(char* dest, size_t destLen, const wchar_t* src, size_t srcLen, bool writeDirect)
{
if (!writeDirect)
{
const size_t required = Utf16Le_To_Utf8_Len(src, srcLen);
if (required == 0)
{
if (destLen > 0)
dest[0] = '\0';
return 0;
}
if (required > destLen)
{
if (destLen > 0)
dest[0] = '\0';
return required;
}
}
const int written = WideCharToMultiByte(CP_UTF8, 0, src, (int)srcLen, dest, (int)destLen, nullptr, nullptr);
if (written <= 0)
{
if (destLen > 0)
dest[0] = '\0';
return 0;
}
if ((size_t)written < destLen)
dest[written] = '\0';
return (size_t)written;
}

size_t Utf8_To_Utf16Le(wchar_t* dest, size_t destLen, const char* src, size_t srcLen, bool writeDirect)
{
if (!writeDirect)
{
const size_t required = Utf8_To_Utf16Le_Len(src, srcLen);
if (required == 0)
{
if (destLen > 0)
dest[0] = L'\0';
return 0;
}
if (required > destLen)
{
if (destLen > 0)
dest[0] = L'\0';
return required;
}
}
const int written = MultiByteToWideChar(CP_UTF8, 0, src, (int)srcLen, dest, (int)destLen);
if (written <= 0)
{
if (destLen > 0)
dest[0] = L'\0';
return 0;
}
if ((size_t)written < destLen)
dest[written] = L'\0';
return (size_t)written;
}

#else
#error "Not implemented"
#endif
Loading
Loading