Once again I'm confused. Take a look at this code.

/// display the wchar_t array
void printwbytes(wchar_t * pwc, int nsz)
	for(size_t i = 0; i < nsz; i++) 
		printf("0x%0.4X ", pwc[i]); 
		//printf("%0.2X ", pwc[i]);  // just prints the low order byte
		if((i+1) % 8 == 0) { printf("\n"); }

}// printwbytes(wchar_t * pwc, int nsz)

/// change from bigendian to littleendian or vice versa
int reverseEndian(wchar_t * wcp, int nsz)
	wchar_t wch1 = ' ';
	wchar_t wch2 = ' ';
	wchar_t * wcp2 = new wchar_t[nsz];
	unsigned char lb, hb;
	for(size_t i = 0; i <  nsz; i++)
		wch1 = wcp[i];
		lb = LOBYTE(wch1);     
		hb = HIBYTE(wch1);
		lb  = wch1 &0xff;
		hb = wch1 >> 8;

		wch2 = (lb << 8) + hb;
		wcp[i] = wch2;
	return nsz;

}// reverseEndian(wchar_t * wcp, int nsz)

int _tmain(int argc, _TCHAR* argv[])

	wchar_t wcin[256], wcout[256];
	wcscpy_s(wcin, 256, _T("Nitche was an idiot"));
	//wcout << wcin << endl;  // not allowed ??
	wprintf(_T("%s\n"), wcin);
	printwbytes(wcin, wcslen(wcin));
	reverseEndian(wcin, wcslen(wcin));
	printwbytes(wcin, wcslen(wcin));
	wprintf(_T("%s\n"), wcin);
Nitche was an idiot

0x004E 0x0069 0x0074 0x0063 0x0068 0x0065 0x0020 0x0077
0x0061 0x0073 0x0020 0x0061 0x006E 0x0020 0x0069 0x0064
0x0069 0x006F 0x0074

0x4E00 0x6900 0x7400 0x6300 0x6800 0x6500 0x2000 0x7700
0x6100 0x7300 0x2000 0x6100 0x6E00 0x2000 0x6900 0x6400
0x6900 0x6F00 0x7400
What I find confusing is the use of 0x[hi byte][lobyte], e.g., 0x004E suggesting big-endianness when one considers the definitions:
UTF-16 (BE) - highest value byte at lowest address index
UTF-16 (LE) - lowest value byte at lowest address index
My only explanation is that 0x004E refers to something other than the address of the wide byte. But I cannot fathom why wprintf only accepts the byte order that it does.

I may just be having a brain fart, but this confusion has caused me considerable difficulty in dealing with encryption algorithms that need to deal extensively with wchar_t. Your thoughts greatly appreciated.