diff --git a/.gitignore b/.gitignore index e4cd55021..4dce217cd 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,6 @@ build # Special cog exceptions !Frameworks/OpenMPT/OpenMPT/build + +# User-specific xcconfig files +Xcode-config/DEVELOPMENT_TEAM.xcconfig diff --git a/Cog.entitlements b/Cog.entitlements index 6883e4a27..461123ca3 100644 --- a/Cog.entitlements +++ b/Cog.entitlements @@ -2,7 +2,7 @@ - com.apple.security.cs.allow-unsigned-executable-memory + com.apple.security.cs.allow-jit com.apple.security.cs.disable-library-validation diff --git a/Cog.xcodeproj/project.pbxproj b/Cog.xcodeproj/project.pbxproj index 6bcfe3077..4bcda0c7f 100644 --- a/Cog.xcodeproj/project.pbxproj +++ b/Cog.xcodeproj/project.pbxproj @@ -927,6 +927,8 @@ 29B97324FDCFA39411CA2CEA /* AppKit.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AppKit.framework; path = /System/Library/Frameworks/AppKit.framework; sourceTree = ""; }; 29B97325FDCFA39411CA2CEA /* Foundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Foundation.framework; path = /System/Library/Frameworks/Foundation.framework; sourceTree = ""; }; 32CA4F630368D1EE00C91783 /* Cog_Prefix.pch */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Cog_Prefix.pch; sourceTree = ""; }; + 3DDFC2462344EC6B000F1453 /* DEVELOPMENT_TEAM.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = DEVELOPMENT_TEAM.xcconfig; sourceTree = ""; }; + 3DDFC2472344EC6B000F1453 /* Shared.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = Shared.xcconfig; sourceTree = ""; }; 5604D4590D60349B004F5C5D /* SpotlightWindowController.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = SpotlightWindowController.m; path = Spotlight/SpotlightWindowController.m; sourceTree = ""; }; 5604D45A0D60349B004F5C5D /* SpotlightWindowController.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = SpotlightWindowController.h; path = Spotlight/SpotlightWindowController.h; sourceTree = ""; }; 5604D4F40D60726E004F5C5D /* SpotlightPlaylistEntry.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = SpotlightPlaylistEntry.h; path = Spotlight/SpotlightPlaylistEntry.h; sourceTree = ""; }; @@ -1513,6 +1515,7 @@ 29B97317FDCFA39411CA2CEA /* Resources */, 29B97323FDCFA39411CA2CEA /* Frameworks */, 17B619FF0B909ED400BC003F /* PlugIns */, + 3DDFC2452344EC6A000F1453 /* Xcode-config */, 19C28FACFE9D520D11CA2CBB /* Products */, ); name = Cog; @@ -1562,6 +1565,15 @@ name = Frameworks; sourceTree = ""; }; + 3DDFC2452344EC6A000F1453 /* Xcode-config */ = { + isa = PBXGroup; + children = ( + 3DDFC2462344EC6B000F1453 /* DEVELOPMENT_TEAM.xcconfig */, + 3DDFC2472344EC6B000F1453 /* Shared.xcconfig */, + ); + path = "Xcode-config"; + sourceTree = ""; + }; 566D32170D538550004466A5 /* Products */ = { isa = PBXGroup; children = ( @@ -1947,7 +1959,6 @@ LastUpgradeCheck = 1100; TargetAttributes = { 8D1107260486CEB800E47090 = { - DevelopmentTeam = N6E749HJ2X; ProvisioningStyle = Automatic; }; }; @@ -2826,7 +2837,6 @@ CODE_SIGN_STYLE = Automatic; COMBINE_HIDPI_IMAGES = YES; COPY_PHASE_STRIP = NO; - DEVELOPMENT_TEAM = ""; ENABLE_HARDENED_RUNTIME = YES; FRAMEWORK_SEARCH_PATHS = ( ThirdParty/Frameworks, @@ -2866,7 +2876,6 @@ CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; COMBINE_HIDPI_IMAGES = YES; - DEVELOPMENT_TEAM = ""; ENABLE_HARDENED_RUNTIME = YES; FRAMEWORK_SEARCH_PATHS = ( ThirdParty/Frameworks, @@ -2899,6 +2908,7 @@ }; C01FCF4F08A954540054247B /* Debug */ = { isa = XCBuildConfiguration; + baseConfigurationReference = 3DDFC2472344EC6B000F1453 /* Shared.xcconfig */; buildSettings = { ALWAYS_SEARCH_USER_PATHS = NO; CLANG_ANALYZER_LOCALIZABILITY_NONLOCALIZED = YES; @@ -2939,6 +2949,7 @@ }; C01FCF5008A954540054247B /* Release */ = { isa = XCBuildConfiguration; + baseConfigurationReference = 3DDFC2472344EC6B000F1453 /* Shared.xcconfig */; buildSettings = { ALWAYS_SEARCH_USER_PATHS = NO; CLANG_ANALYZER_LOCALIZABILITY_NONLOCALIZED = YES; diff --git a/Frameworks/lazyusf2/lazyusf2/osal/preproc.h b/Frameworks/lazyusf2/lazyusf2/osal/preproc.h index c107506e1..f601a11b4 100644 --- a/Frameworks/lazyusf2/lazyusf2/osal/preproc.h +++ b/Frameworks/lazyusf2/lazyusf2/osal/preproc.h @@ -46,6 +46,11 @@ #define OSAL_BREAKPOINT_INTERRUPT __asm__(" int $3; "); #define ALIGN(BYTES,DATA) DATA __attribute__((aligned(BYTES))) #define osal_inline inline + #ifdef __i386__ + #define osal_fastcall __attribute__((regparm(1))) + #else + #define osal_fastcall + #endif /* string functions */ #define osal_insensitive_strcmp(x, y) strcasecmp(x, y) diff --git a/Frameworks/lazyusf2/lazyusf2/r4300/recomp.c b/Frameworks/lazyusf2/lazyusf2/r4300/recomp.c index b45709c94..6c26705e0 100644 --- a/Frameworks/lazyusf2/lazyusf2/r4300/recomp.c +++ b/Frameworks/lazyusf2/lazyusf2/r4300/recomp.c @@ -2532,8 +2532,12 @@ static void *malloc_exec(usf_state_t * state, size_t size) #define MAP_ANONYMOUS MAP_ANON #endif #endif + + #ifndef MAP_JIT + #define MAP_JIT 0 + #endif - void *block = mmap(NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + void *block = mmap(NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS | MAP_JIT, -1, 0); if (block == MAP_FAILED) { DebugMessage(state, M64MSG_ERROR, "Memory error: couldn't allocate %zi byte block of aligned RWX memory.", size); return NULL; } diff --git a/Frameworks/lazyusf2/lazyusf2/rsp_hle/alist.h b/Frameworks/lazyusf2/lazyusf2/rsp_hle/alist.h index f53db040a..99d765faf 100644 --- a/Frameworks/lazyusf2/lazyusf2/rsp_hle/alist.h +++ b/Frameworks/lazyusf2/lazyusf2/rsp_hle/alist.h @@ -22,7 +22,7 @@ #ifndef ALIST_INTERNAL_H #define ALIST_INTERNAL_H -#include +#include #include #include diff --git a/Frameworks/lazyusf2/lazyusf2/rsp_hle/plugin.c b/Frameworks/lazyusf2/lazyusf2/rsp_hle/plugin.c index 844d395ad..4ba31e4f6 100644 --- a/Frameworks/lazyusf2/lazyusf2/rsp_hle/plugin.c +++ b/Frameworks/lazyusf2/lazyusf2/rsp_hle/plugin.c @@ -25,9 +25,9 @@ #include #include -#include "../usf.h" -#include "../main.h" -#include "../usf_internal.h" +#include "../usf/usf.h" +#include "../main/main.h" +#include "../usf/usf_internal.h" #include "hle.h" @@ -69,7 +69,7 @@ void HleErrorMessage(void* user_defined, const char *message, ...) va_end( ap ); state->last_error = state->error_message; - StopEmulation( state ); + state->stop = 1; } void HleWarnMessage(void* user_defined, const char *message, ...) @@ -89,12 +89,12 @@ void HleWarnMessage(void* user_defined, const char *message, ...) va_end( ap ); state->last_error = state->error_message; - StopEmulation( state ); + state->stop = 1; } void HleCheckInterrupts(void* user_defined) { - CheckInterrupts((usf_state_t*)user_defined); + // check_interupt((usf_state_t*)user_defined); } void HleProcessDlistList(void* user_defined) diff --git a/Frameworks/vgmstream/libvgmstream.xcodeproj/project.pbxproj b/Frameworks/vgmstream/libvgmstream.xcodeproj/project.pbxproj index 06a288d03..419a80798 100644 --- a/Frameworks/vgmstream/libvgmstream.xcodeproj/project.pbxproj +++ b/Frameworks/vgmstream/libvgmstream.xcodeproj/project.pbxproj @@ -548,6 +548,7 @@ 83C7282922BC8C1500678B4A /* mixing.c in Sources */ = {isa = PBXBuildFile; fileRef = 83C7282522BC8C1400678B4A /* mixing.c */; }; 83C7282A22BC8C1500678B4A /* plugins.c in Sources */ = {isa = PBXBuildFile; fileRef = 83C7282622BC8C1400678B4A /* plugins.c */; }; 83CD428A1F787879000F77BE /* libswresample.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 83CD42851F787878000F77BE /* libswresample.a */; }; + 83D2F58E2356B266007646ED /* libopus.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 83D2F58A2356B266007646ED /* libopus.a */; }; 83D731101A7394BF00CA1366 /* g7221.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 83D730EB1A738EB300CA1366 /* g7221.framework */; }; 83D731111A7394D300CA1366 /* g7221.framework in CopyFiles */ = {isa = PBXBuildFile; fileRef = 83D730EB1A738EB300CA1366 /* g7221.framework */; settings = {ATTRIBUTES = (CodeSignOnCopy, RemoveHeadersOnCopy, ); }; }; 83D731891A749D1500CA1366 /* g719.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 83D7313E1A74968A00CA1366 /* g719.framework */; }; @@ -1231,6 +1232,7 @@ 83C7282522BC8C1400678B4A /* mixing.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mixing.c; sourceTree = ""; }; 83C7282622BC8C1400678B4A /* plugins.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = plugins.c; sourceTree = ""; }; 83CD42851F787878000F77BE /* libswresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libswresample.a; path = ../../ThirdParty/ffmpeg/lib/libswresample.a; sourceTree = ""; }; + 83D2F58A2356B266007646ED /* libopus.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libopus.a; path = ../../ThirdParty/ffmpeg/lib/libopus.a; sourceTree = ""; }; 83D730E51A738EB200CA1366 /* g7221.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = g7221.xcodeproj; path = ../g7221/g7221.xcodeproj; sourceTree = ""; }; 83D731381A74968900CA1366 /* g719.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = g719.xcodeproj; path = ../g719/g719.xcodeproj; sourceTree = ""; }; 83D7318B1A749EEE00CA1366 /* g719_decoder.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = g719_decoder.c; sourceTree = ""; }; @@ -1264,6 +1266,7 @@ 838BDB711D3B1F990022CA6F /* CoreFoundation.framework in Frameworks */, 838BDB6A1D3AF7140022CA6F /* libiconv.tbd in Frameworks */, 838BDB681D3AF70D0022CA6F /* libz.tbd in Frameworks */, + 83D2F58E2356B266007646ED /* libopus.a in Frameworks */, 83CD428A1F787879000F77BE /* libswresample.a in Frameworks */, 83D731891A749D1500CA1366 /* g719.framework in Frameworks */, 83D731101A7394BF00CA1366 /* g7221.framework in Frameworks */, @@ -1324,6 +1327,7 @@ 836F6B3B18BDB8880095E648 /* Frameworks */ = { isa = PBXGroup; children = ( + 83D2F58A2356B266007646ED /* libopus.a */, 838BDB7E1D3B1FD10022CA6F /* Cocoa.framework */, 838BDB7C1D3B1FCC0022CA6F /* CoreVideo.framework */, 838BDB7A1D3B1FC20022CA6F /* CoreMedia.framework */, diff --git a/Frameworks/vgmstream/vgmstream/src/coding/adx_decoder.c b/Frameworks/vgmstream/vgmstream/src/coding/adx_decoder.c index f27b26d33..f906ad0f8 100644 --- a/Frameworks/vgmstream/vgmstream/src/coding/adx_decoder.c +++ b/Frameworks/vgmstream/vgmstream/src/coding/adx_decoder.c @@ -1,155 +1,84 @@ #include "coding.h" #include "../util.h" -void decode_adx(VGMSTREAMCHANNEL * stream, sample * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, int32_t frame_bytes) { - int i; - int32_t sample_count; - int32_t frame_samples = (frame_bytes - 2) * 2; - - int framesin = first_sample/frame_samples; - - int32_t scale = read_16bitBE(stream->offset+framesin*frame_bytes,stream->streamfile) + 1; +void decode_adx(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, int32_t frame_size, coding_t coding_type) { + uint8_t frame[0x12] = {0}; + off_t frame_offset; + int i, frames_in, sample_count = 0; + size_t bytes_per_frame, samples_per_frame; + int scale, coef1, coef2; int32_t hist1 = stream->adpcm_history1_32; int32_t hist2 = stream->adpcm_history2_32; - int coef1 = stream->adpcm_coef[0]; - int coef2 = stream->adpcm_coef[1]; - first_sample = first_sample%frame_samples; - for (i=first_sample,sample_count=0; ioffset+framesin*frame_bytes +2+i/2,stream->streamfile); + /* external interleave (fixed size), mono */ + bytes_per_frame = frame_size; + samples_per_frame = (bytes_per_frame - 0x02) * 2; /* always 32 */ + frames_in = first_sample / samples_per_frame; + first_sample = first_sample % samples_per_frame; - outbuf[sample_count] = clamp16( - (i&1? - get_low_nibble_signed(sample_byte): - get_high_nibble_signed(sample_byte) - ) * scale + - (coef1 * hist1 >> 12) + (coef2 * hist2 >> 12) - ); + /* parse frame header */ + frame_offset = stream->offset + bytes_per_frame * frames_in; + read_streamfile(frame, frame_offset, bytes_per_frame, stream->streamfile); /* ignore EOF errors */ - hist2 = hist1; - hist1 = outbuf[sample_count]; + scale = get_16bitBE(frame+0x00); + switch(coding_type) { + case coding_CRI_ADX: + scale = scale + 1; + coef1 = stream->adpcm_coef[0]; + coef2 = stream->adpcm_coef[1]; + break; + case coding_CRI_ADX_exp: + scale = 1 << (12 - scale); + coef1 = stream->adpcm_coef[0]; + coef2 = stream->adpcm_coef[1]; + break; + case coding_CRI_ADX_fixed: + scale = (scale & 0x1fff) + 1; + coef1 = stream->adpcm_coef[(frame[0] >> 5)*2 + 0]; + coef2 = stream->adpcm_coef[(frame[0] >> 5)*2 + 1]; + break; + case coding_CRI_ADX_enc_8: + case coding_CRI_ADX_enc_9: + scale = ((scale ^ stream->adx_xor) & 0x1fff) + 1; + coef1 = stream->adpcm_coef[0]; + coef2 = stream->adpcm_coef[1]; + break; + default: + scale = scale + 1; + coef1 = stream->adpcm_coef[0]; + coef2 = stream->adpcm_coef[1]; + break; } - stream->adpcm_history1_32 = hist1; - stream->adpcm_history2_32 = hist2; -} + /* decode nibbles */ + for (i = first_sample; i < first_sample + samples_to_do; i++) { + int32_t sample = 0; + uint8_t nibbles = frame[0x02 + i/2]; -void decode_adx_exp(VGMSTREAMCHANNEL * stream, sample * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, int32_t frame_bytes) { - int i; - int32_t sample_count; - int32_t frame_samples = (frame_bytes - 2) * 2; + sample = i&1 ? /* high nibble first */ + get_low_nibble_signed(nibbles): + get_high_nibble_signed(nibbles); + sample = sample * scale + (coef1 * hist1 >> 12) + (coef2 * hist2 >> 12); + sample = clamp16(sample); - int framesin = first_sample/frame_samples; - - int32_t scale = read_16bitBE(stream->offset+framesin*frame_bytes,stream->streamfile); - int32_t hist1, hist2; - int coef1, coef2; - scale = 1 << (12 - scale); - hist1 = stream->adpcm_history1_32; - hist2 = stream->adpcm_history2_32; - coef1 = stream->adpcm_coef[0]; - coef2 = stream->adpcm_coef[1]; - - first_sample = first_sample%frame_samples; - - for (i=first_sample,sample_count=0; ioffset+framesin*frame_bytes +2+i/2,stream->streamfile); - - outbuf[sample_count] = clamp16( - (i&1? - get_low_nibble_signed(sample_byte): - get_high_nibble_signed(sample_byte) - ) * scale + - (coef1 * hist1 >> 12) + (coef2 * hist2 >> 12) - ); + outbuf[sample_count] = sample; + sample_count += channelspacing; hist2 = hist1; - hist1 = outbuf[sample_count]; - } - - stream->adpcm_history1_32 = hist1; - stream->adpcm_history2_32 = hist2; -} - -void decode_adx_fixed(VGMSTREAMCHANNEL * stream, sample * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, int32_t frame_bytes) { - int i; - int32_t sample_count; - int32_t frame_samples = (frame_bytes - 2) * 2; - - int framesin = first_sample/frame_samples; - - int32_t scale = (read_16bitBE(stream->offset + framesin*frame_bytes, stream->streamfile) & 0x1FFF) + 1; - int32_t predictor = read_8bit(stream->offset + framesin*frame_bytes, stream->streamfile) >> 5; - int32_t hist1 = stream->adpcm_history1_32; - int32_t hist2 = stream->adpcm_history2_32; - int coef1 = stream->adpcm_coef[predictor * 2]; - int coef2 = stream->adpcm_coef[predictor * 2 + 1]; - - first_sample = first_sample%frame_samples; - - for (i=first_sample,sample_count=0; ioffset+framesin*frame_bytes +2+i/2,stream->streamfile); - - outbuf[sample_count] = clamp16( - (i&1? - get_low_nibble_signed(sample_byte): - get_high_nibble_signed(sample_byte) - ) * scale + - (coef1 * hist1 >> 12) + (coef2 * hist2 >> 12) - ); - - hist2 = hist1; - hist1 = outbuf[sample_count]; - } - - stream->adpcm_history1_32 = hist1; - stream->adpcm_history2_32 = hist2; -} - -void adx_next_key(VGMSTREAMCHANNEL * stream) -{ - stream->adx_xor = ( stream->adx_xor * stream->adx_mult + stream->adx_add ) & 0x7fff; -} - -void decode_adx_enc(VGMSTREAMCHANNEL * stream, sample * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, int32_t frame_bytes) { - int i; - int32_t sample_count; - int32_t frame_samples = (frame_bytes - 2) * 2; - - int framesin = first_sample/frame_samples; - - int32_t scale = ((read_16bitBE(stream->offset+framesin*frame_bytes,stream->streamfile) ^ stream->adx_xor)&0x1fff) + 1; - int32_t hist1 = stream->adpcm_history1_32; - int32_t hist2 = stream->adpcm_history2_32; - int coef1 = stream->adpcm_coef[0]; - int coef2 = stream->adpcm_coef[1]; - - first_sample = first_sample%frame_samples; - - for (i=first_sample,sample_count=0; ioffset+framesin*frame_bytes +2+i/2,stream->streamfile); - - outbuf[sample_count] = clamp16( - (i&1? - get_low_nibble_signed(sample_byte): - get_high_nibble_signed(sample_byte) - ) * scale + - (coef1 * hist1 >> 12) + (coef2 * hist2 >> 12) - ); - - hist2 = hist1; - hist1 = outbuf[sample_count]; + hist1 = sample; } stream->adpcm_history1_32 = hist1; stream->adpcm_history2_32 = hist2; - if (!(i % 32)) { - for (i=0;iadx_channels;i++) - { + if ((coding_type == coding_CRI_ADX_enc_8 || coding_type == coding_CRI_ADX_enc_9) && !(i % 32)) { + for (i =0; i < stream->adx_channels; i++) { adx_next_key(stream); } } - +} + +void adx_next_key(VGMSTREAMCHANNEL * stream) { + stream->adx_xor = (stream->adx_xor * stream->adx_mult + stream->adx_add) & 0x7fff; } diff --git a/Frameworks/vgmstream/vgmstream/src/coding/atrac9_decoder.c b/Frameworks/vgmstream/vgmstream/src/coding/atrac9_decoder.c index 3a70c5c64..250504930 100644 --- a/Frameworks/vgmstream/vgmstream/src/coding/atrac9_decoder.c +++ b/Frameworks/vgmstream/vgmstream/src/coding/atrac9_decoder.c @@ -54,6 +54,7 @@ atrac9_codec_data *init_atrac9(atrac9_config *cfg) { data->data_buffer_size = data->info.superframeSize; /* extra leeway as Atrac9Decode seems to overread ~2 bytes (doesn't affect decoding though) */ data->data_buffer = calloc(sizeof(uint8_t), data->data_buffer_size + 0x10); + /* while ATRAC9 uses float internally, Sony's API only return PCM16 */ data->sample_buffer = calloc(sizeof(sample_t), data->info.channels * data->info.frameSamples * data->info.framesInSuperframe); data->samples_to_discard = cfg->encoder_delay; diff --git a/Frameworks/vgmstream/vgmstream/src/coding/coding.h b/Frameworks/vgmstream/vgmstream/src/coding/coding.h index 9b1f378da..c56a28c2d 100644 --- a/Frameworks/vgmstream/vgmstream/src/coding/coding.h +++ b/Frameworks/vgmstream/vgmstream/src/coding/coding.h @@ -4,10 +4,7 @@ #include "../vgmstream.h" /* adx_decoder */ -void decode_adx(VGMSTREAMCHANNEL * stream, sample * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, int32_t frame_bytes); -void decode_adx_exp(VGMSTREAMCHANNEL * stream, sample * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, int32_t frame_bytes); -void decode_adx_fixed(VGMSTREAMCHANNEL * stream, sample * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, int32_t frame_bytes); -void decode_adx_enc(VGMSTREAMCHANNEL * stream, sample * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, int32_t frame_bytes); +void decode_adx(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, int32_t frame_bytes, coding_t coding_type); void adx_next_key(VGMSTREAMCHANNEL * stream); /* g721_decoder */ @@ -92,10 +89,10 @@ size_t ps_cfg_bytes_to_samples(size_t bytes, size_t frame_size, int channels); int ps_check_format(STREAMFILE *streamFile, off_t offset, size_t max); /* psv_decoder */ -void decode_hevag(VGMSTREAMCHANNEL * stream, sample * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do); +void decode_hevag(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do); /* xa_decoder */ -void decode_xa(VGMSTREAMCHANNEL * stream, sample * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, int channel); +void decode_xa(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, int channel); size_t xa_bytes_to_samples(size_t bytes, int channels, int is_blocked); /* ea_xa_decoder */ @@ -308,6 +305,8 @@ void free_ffmpeg(ffmpeg_codec_data *data); void ffmpeg_set_skip_samples(ffmpeg_codec_data * data, int skip_samples); uint32_t ffmpeg_get_channel_layout(ffmpeg_codec_data * data); void ffmpeg_set_channel_remapping(ffmpeg_codec_data * data, int *channels_remap); +const char* ffmpeg_get_codec_name(ffmpeg_codec_data * data); +void ffmpeg_set_force_seek(ffmpeg_codec_data * data); /* ffmpeg_decoder_utils.c (helper-things) */ diff --git a/Frameworks/vgmstream/vgmstream/src/coding/coding_utils.c b/Frameworks/vgmstream/vgmstream/src/coding/coding_utils.c index 422a74188..fbb6db35e 100644 --- a/Frameworks/vgmstream/vgmstream/src/coding/coding_utils.c +++ b/Frameworks/vgmstream/vgmstream/src/coding/coding_utils.c @@ -1159,26 +1159,26 @@ int w_bits(vgm_bitstream * ob, int num_bits, uint32_t value) { /* CUSTOM STREAMFILES */ /* ******************************************** */ -STREAMFILE* setup_subfile_streamfile(STREAMFILE *streamFile, off_t subfile_offset, size_t subfile_size, const char* extension) { - STREAMFILE *temp_streamFile = NULL, *new_streamFile = NULL; +STREAMFILE* setup_subfile_streamfile(STREAMFILE *sf, off_t subfile_offset, size_t subfile_size, const char* extension) { + STREAMFILE *temp_sf = NULL, *new_sf = NULL; - new_streamFile = open_wrap_streamfile(streamFile); - if (!new_streamFile) goto fail; - temp_streamFile = new_streamFile; + new_sf = open_wrap_streamfile(sf); + if (!new_sf) goto fail; + temp_sf = new_sf; - new_streamFile = open_clamp_streamfile(temp_streamFile, subfile_offset,subfile_size); - if (!new_streamFile) goto fail; - temp_streamFile = new_streamFile; + new_sf = open_clamp_streamfile(temp_sf, subfile_offset, subfile_size); + if (!new_sf) goto fail; + temp_sf = new_sf; if (extension) { - new_streamFile = open_fakename_streamfile(temp_streamFile, NULL,extension); - if (!new_streamFile) goto fail; - temp_streamFile = new_streamFile; + new_sf = open_fakename_streamfile(temp_sf, NULL, extension); + if (!new_sf) goto fail; + temp_sf = new_sf; } - return temp_streamFile; + return temp_sf; fail: - close_streamfile(temp_streamFile); + close_streamfile(temp_sf); return NULL; } diff --git a/Frameworks/vgmstream/vgmstream/src/coding/ea_xas_decoder.c b/Frameworks/vgmstream/vgmstream/src/coding/ea_xas_decoder.c index e28c6c8c2..5fde33b54 100644 --- a/Frameworks/vgmstream/vgmstream/src/coding/ea_xas_decoder.c +++ b/Frameworks/vgmstream/vgmstream/src/coding/ea_xas_decoder.c @@ -1,6 +1,12 @@ #include "coding.h" #include "../util.h" +#if 0 +/* known game code/platforms use float buffer and coefs, but some approximations around use this int math: + * ... + * coef1 = table[index + 0] + * coef2 = table[index + 4] + * sample = clamp16(((signed_nibble << (20 - shift)) + hist1 * coef1 + hist2 * coef2 + 128) >> 8); */ static const int EA_XA_TABLE[20] = { 0, 240, 460, 392, 0, 0, -208, -220, @@ -8,33 +14,58 @@ static const int EA_XA_TABLE[20] = { 7, 8, 10, 11, 0, -1, -3, -4 }; +#endif -/* EA-XAS v1, evolution of EA-XA/XAS and cousin of MTA2. From FFmpeg (general info) + MTA2 (layout) + EA-XA (decoding) +/* standard CD-XA's K0/K1 filter pairs */ +static const float xa_coefs[16][2] = { + { 0.0, 0.0 }, + { 0.9375, 0.0 }, + { 1.796875, -0.8125 }, + { 1.53125, -0.859375 }, + /* only 4 pairs exist, assume 0s for bad indexes */ +}; + +/* EA-XAS v1, evolution of EA-XA/XAS and cousin of MTA2. Reverse engineered from various .exes/.so * - * Layout: blocks of 0x4c per channel (128 samples), divided into 4 headers + 4 vertical groups of 15 bytes (for parallelism?). + * Layout: blocks of 0x4c per channel (128 samples), divided into 4 headers + 4 vertical groups of 15 bytes. + * Original code reads all headers first then processes all nibbles (for CPU cache/parallelism/SIMD optimizations). * To simplify, always decodes the block and discards unneeded samples, so doesn't use external hist. */ -void decode_ea_xas_v1(VGMSTREAMCHANNEL * stream, sample * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, int channel) { - int group, row, i; - int samples_done = 0, sample_count = 0; +void decode_ea_xas_v1(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, int channel) { + uint8_t frame[0x4c] = {0}; + off_t frame_offset; + int group, row, i, samples_done = 0, sample_count = 0; + size_t bytes_per_frame, samples_per_frame; /* internal interleave */ - int block_samples = 128; - first_sample = first_sample % block_samples; + bytes_per_frame = 0x4c; + samples_per_frame = 128; + first_sample = first_sample % samples_per_frame; + + frame_offset = stream->offset + bytes_per_frame * channel; + read_streamfile(frame, frame_offset, bytes_per_frame, stream->streamfile); /* ignore EOF errors */ + + //todo: original code uses float sample buffer: + //- header pcm-hist to float-hist: hist * (1/32768) + //- nibble to signed to float: (int32_t)(pnibble << 28) * SHIFT_MUL_LUT[shift_index] + // look-up table just simplifies ((nibble << 12 << 12) >> 12 + shift) * (1/32768) + // though maybe introduces rounding errors? + //- coefs apply normally, though hists are already floats + //- final float sample isn't clamped - /* process groups */ + /* parse group headers */ for (group = 0; group < 4; group++) { - int coef1, coef2; + float coef1, coef2; int16_t hist1, hist2; uint8_t shift; - uint32_t group_header = (uint32_t)read_32bitLE(stream->offset + channel*0x4c + group*0x4, stream->streamfile); /* always LE */ + uint32_t group_header = (uint32_t)get_32bitLE(frame + group*0x4); /* always LE */ - coef1 = EA_XA_TABLE[(uint8_t)(group_header & 0x0F) + 0]; - coef2 = EA_XA_TABLE[(uint8_t)(group_header & 0x0F) + 4]; - hist2 = (int16_t)(group_header & 0xFFF0); + coef1 = xa_coefs[group_header & 0x0F][0]; + coef2 = xa_coefs[group_header & 0x0F][1]; + hist2 = (int16_t)((group_header >> 0) & 0xFFF0); hist1 = (int16_t)((group_header >> 16) & 0xFFF0); - shift = 20 - ((group_header >> 16) & 0x0F); + shift = (group_header >> 16) & 0x0F; /* write header samples (needed) */ if (sample_count >= first_sample && samples_done < samples_to_do) { @@ -51,12 +82,14 @@ void decode_ea_xas_v1(VGMSTREAMCHANNEL * stream, sample * outbuf, int channelspa /* process nibbles per group */ for (row = 0; row < 15; row++) { for (i = 0; i < 1*2; i++) { - uint8_t sample_byte = (uint8_t)read_8bit(stream->offset + channel*0x4c + 4*4 + row*0x04 + group + i/2, stream->streamfile); + uint8_t nibbles = frame[4*4 + row*0x04 + group + i/2]; int sample; - sample = get_nibble_signed(sample_byte, !(i&1)); /* upper first */ - sample = sample << shift; - sample = (sample + hist1 * coef1 + hist2 * coef2 + 128) >> 8; + sample = i&1 ? /* high nibble first */ + (nibbles >> 0) & 0x0f : + (nibbles >> 4) & 0x0f; + sample = (int16_t)(sample << 12) >> shift; /* 16b sign extend + scale */ + sample = sample + hist1 * coef1 + hist2 * coef2; sample = clamp16(sample); if (sample_count >= first_sample && samples_done < samples_to_do) { @@ -73,37 +106,43 @@ void decode_ea_xas_v1(VGMSTREAMCHANNEL * stream, sample * outbuf, int channelspa /* internal interleave (interleaved channels, but manually advances to co-exist with ea blocks) */ - if (first_sample + samples_done == block_samples) { - stream->offset += 0x4c * channelspacing; + if (first_sample + samples_done == samples_per_frame) { + stream->offset += bytes_per_frame * channelspacing; } } /* EA-XAS v0, without complex layouts and closer to EA-XA. Somewhat based on daemon1's decoder */ void decode_ea_xas_v0(VGMSTREAMCHANNEL * stream, sample * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, int channel) { + uint8_t frame[0x13] = {0}; off_t frame_offset; - int i; - int block_samples, frames_in, samples_done = 0, sample_count = 0; + int i, frames_in, samples_done = 0, sample_count = 0; + size_t bytes_per_frame, samples_per_frame; + /* external interleave (fixed size), mono */ - block_samples = 32; - frames_in = first_sample / block_samples; - first_sample = first_sample % block_samples; + bytes_per_frame = 0x02 + 0x02 + 0x0f; + samples_per_frame = 1 + 1 + 0x0f*2; + frames_in = first_sample / samples_per_frame; + first_sample = first_sample % samples_per_frame; - frame_offset = stream->offset + (0x0f+0x02+0x02)*frames_in; + frame_offset = stream->offset + bytes_per_frame * frames_in; + read_streamfile(frame, frame_offset, bytes_per_frame, stream->streamfile); /* ignore EOF errors */ - /* process frames */ + //todo see above + + /* process frame */ { - int coef1, coef2; + float coef1, coef2; int16_t hist1, hist2; uint8_t shift; - uint32_t frame_header = (uint32_t)read_32bitLE(frame_offset, stream->streamfile); /* always LE */ + uint32_t frame_header = (uint32_t)get_32bitLE(frame); /* always LE */ - coef1 = EA_XA_TABLE[(uint8_t)(frame_header & 0x0F) + 0]; - coef2 = EA_XA_TABLE[(uint8_t)(frame_header & 0x0F) + 4]; - hist2 = (int16_t)(frame_header & 0xFFF0); + coef1 = xa_coefs[frame_header & 0x0F][0]; + coef2 = xa_coefs[frame_header & 0x0F][1]; + hist2 = (int16_t)((frame_header >> 0) & 0xFFF0); hist1 = (int16_t)((frame_header >> 16) & 0xFFF0); - shift = 20 - ((frame_header >> 16) & 0x0F); + shift = (frame_header >> 16) & 0x0F; /* write header samples (needed) */ if (sample_count >= first_sample && samples_done < samples_to_do) { @@ -119,12 +158,14 @@ void decode_ea_xas_v0(VGMSTREAMCHANNEL * stream, sample * outbuf, int channelspa /* process nibbles */ for (i = 0; i < 0x0f*2; i++) { - uint8_t sample_byte = (uint8_t)read_8bit(frame_offset + 0x02 + 0x02 + i/2, stream->streamfile); + uint8_t nibbles = frame[0x02 + 0x02 + i/2]; int sample; - sample = get_nibble_signed(sample_byte, !(i&1)); /* upper first */ - sample = sample << shift; - sample = (sample + hist1 * coef1 + hist2 * coef2 + 128) >> 8; + sample = i&1 ? /* high nibble first */ + (nibbles >> 0) & 0x0f : + (nibbles >> 4) & 0x0f; + sample = (int16_t)(sample << 12) >> shift; /* 16b sign extend + scale */ + sample = sample + hist1 * coef1 + hist2 * coef2; sample = clamp16(sample); if (sample_count >= first_sample && samples_done < samples_to_do) { diff --git a/Frameworks/vgmstream/vgmstream/src/coding/ffmpeg_decoder.c b/Frameworks/vgmstream/vgmstream/src/coding/ffmpeg_decoder.c index 569b8c616..465ff7305 100644 --- a/Frameworks/vgmstream/vgmstream/src/coding/ffmpeg_decoder.c +++ b/Frameworks/vgmstream/vgmstream/src/coding/ffmpeg_decoder.c @@ -2,8 +2,6 @@ #ifdef VGM_USE_FFMPEG -/* internal sizes, can be any value */ -#define FFMPEG_DEFAULT_SAMPLE_BUFFER_SIZE 2048 #define FFMPEG_DEFAULT_IO_BUFFER_SIZE 128 * 1024 @@ -28,12 +26,14 @@ static void g_init_ffmpeg() { g_ffmpeg_initialized = 1; av_log_set_flags(AV_LOG_SKIP_REPEATED); av_log_set_level(AV_LOG_ERROR); - //av_register_all(); /* not needed in newer versions */ +//#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 18, 100) +// av_register_all(); /* not needed in newer versions */ +//#endif g_ffmpeg_initialized = 2; } } -static void remap_audio(sample_t *outbuf, int sample_count, int channels, int channel_mappings[]) { +static void remap_audio(sample_t *outbuf, int sample_count, int channels, int *channel_mappings) { int ch_from,ch_to,s; sample_t temp; for (s = 0; s < sample_count; s++) { @@ -52,68 +52,6 @@ static void remap_audio(sample_t *outbuf, int sample_count, int channels, int ch } } -static void invert_audio(sample_t *outbuf, int sample_count, int channels) { - int i; - - for (i = 0; i < sample_count*channels; i++) { - outbuf[i] = -outbuf[i]; - } -} - -/* converts codec's samples (can be in any format, ex. Ogg's float32) to PCM16 */ -static void convert_audio_pcm16(sample_t *outbuf, const uint8_t *inbuf, int fullSampleCount, int bitsPerSample, int floatingPoint) { - int s; - switch (bitsPerSample) { - case 8: { - for (s = 0; s < fullSampleCount; s++) { - *outbuf++ = ((int)(*(inbuf++))-0x80) << 8; - } - break; - } - case 16: { - int16_t *s16 = (int16_t *)inbuf; - for (s = 0; s < fullSampleCount; s++) { - *outbuf++ = *(s16++); - } - break; - } - case 32: { - if (!floatingPoint) { - int32_t *s32 = (int32_t *)inbuf; - for (s = 0; s < fullSampleCount; s++) { - *outbuf++ = (*(s32++)) >> 16; - } - } - else { - float *s32 = (float *)inbuf; - for (s = 0; s < fullSampleCount; s++) { - float sample = *s32++; - int s16 = (int)(sample * 32768.0f); - if ((unsigned)(s16 + 0x8000) & 0xFFFF0000) { - s16 = (s16 >> 31) ^ 0x7FFF; - } - *outbuf++ = s16; - } - } - break; - } - case 64: { - if (floatingPoint) { - double *s64 = (double *)inbuf; - for (s = 0; s < fullSampleCount; s++) { - double sample = *s64++; - int s16 = (int)(sample * 32768.0f); - if ((unsigned)(s16 + 0x8000) & 0xFFFF0000) { - s16 = (s16 >> 31) ^ 0x7FFF; - } - *outbuf++ = s16; - } - } - break; - } - } -} - /** * Special patching for FFmpeg's buggy seek code. * @@ -134,7 +72,7 @@ static int init_seek(ffmpeg_codec_data * data) { int distance = 0; /* always 0 ("duration") */ AVStream * stream = data->formatCtx->streams[data->streamIndex]; - AVPacket * pkt = data->lastReadPacket; + AVPacket * pkt = data->packet; /* read_seek shouldn't need this index, but direct access to FFmpeg's internals is no good */ @@ -239,7 +177,7 @@ static int ffmpeg_read(void *opaque, uint8_t *buf, int read_size) { if (max_to_copy > read_size) max_to_copy = read_size; - memcpy(buf, data->header_insert_block + data->logical_offset, max_to_copy); + memcpy(buf, data->header_block + data->logical_offset, max_to_copy); buf += max_to_copy; read_size -= max_to_copy; data->logical_offset += max_to_copy; @@ -323,13 +261,9 @@ ffmpeg_codec_data * init_ffmpeg_header_offset(STREAMFILE *streamFile, uint8_t * * Stream index can be passed if the file has multiple audio streams that FFmpeg can demux (1=first). */ ffmpeg_codec_data * init_ffmpeg_header_offset_subsong(STREAMFILE *streamFile, uint8_t * header, uint64_t header_size, uint64_t start, uint64_t size, int target_subsong) { - char filename[PATH_LIMIT]; ffmpeg_codec_data * data = NULL; int errcode; - AVStream *stream; - AVRational tb; - /* check values */ if ((header && !header_size) || (!header && header_size)) @@ -341,7 +275,7 @@ ffmpeg_codec_data * init_ffmpeg_header_offset_subsong(STREAMFILE *streamFile, ui } - /* ffmpeg global setup */ + /* initial FFmpeg setup */ g_init_ffmpeg(); @@ -349,15 +283,14 @@ ffmpeg_codec_data * init_ffmpeg_header_offset_subsong(STREAMFILE *streamFile, ui data = calloc(1, sizeof(ffmpeg_codec_data)); if (!data) return NULL; - streamFile->get_name( streamFile, filename, sizeof(filename) ); - data->streamfile = streamFile->open(streamFile, filename, STREAMFILE_DEFAULT_BUFFER_SIZE); + data->streamfile = reopen_streamfile(streamFile, 0); if (!data->streamfile) goto fail; /* fake header to trick FFmpeg into demuxing/decoding the stream */ if (header_size > 0) { data->header_size = header_size; - data->header_insert_block = av_memdup(header, header_size); - if (!data->header_insert_block) goto fail; + data->header_block = av_memdup(header, header_size); + if (!data->header_block) goto fail; } data->start = start; @@ -371,103 +304,59 @@ ffmpeg_codec_data * init_ffmpeg_header_offset_subsong(STREAMFILE *streamFile, ui errcode = init_ffmpeg_config(data, target_subsong, 0); if (errcode < 0) goto fail; - stream = data->formatCtx->streams[data->streamIndex]; + /* reset non-zero values */ + data->read_packet = 1; + /* setup other values */ + { + AVStream *stream = data->formatCtx->streams[data->streamIndex]; + AVRational tb = {0}; - /* derive info */ - data->sampleRate = data->codecCtx->sample_rate; - data->channels = data->codecCtx->channels; - data->bitrate = (int)(data->codecCtx->bit_rate); - data->floatingPoint = 0; - switch (data->codecCtx->sample_fmt) { - case AV_SAMPLE_FMT_U8: - case AV_SAMPLE_FMT_U8P: - data->bitsPerSample = 8; - break; + /* derive info */ + data->sampleRate = data->codecCtx->sample_rate; + data->channels = data->codecCtx->channels; + data->bitrate = (int)(data->codecCtx->bit_rate); +#if 0 + data->blockAlign = data->codecCtx->block_align; + data->frameSize = data->codecCtx->frame_size; + if(data->frameSize == 0) /* some formats don't set frame_size but can get on request, and vice versa */ + data->frameSize = av_get_audio_frame_duration(data->codecCtx,0); +#endif - case AV_SAMPLE_FMT_S16: - case AV_SAMPLE_FMT_S16P: - data->bitsPerSample = 16; - break; + /* try to guess frames/samples (duration isn't always set) */ + tb.num = 1; tb.den = data->codecCtx->sample_rate; + data->totalSamples = av_rescale_q(stream->duration, stream->time_base, tb); + if (data->totalSamples < 0) + data->totalSamples = 0; /* caller must consider this */ - case AV_SAMPLE_FMT_S32: - case AV_SAMPLE_FMT_S32P: - data->bitsPerSample = 32; - break; + /* expose start samples to be skipped (encoder delay, usually added by MDCT-based encoders like AAC/MP3/ATRAC3/XMA/etc) + * get after init_seek because some demuxers like AAC only fill skip_samples for the first packet */ + if (stream->start_skip_samples) /* samples to skip in the first packet */ + data->skipSamples = stream->start_skip_samples; + else if (stream->skip_samples) /* samples to skip in any packet (first in this case), used sometimes instead (ex. AAC) */ + data->skipSamples = stream->skip_samples; - case AV_SAMPLE_FMT_FLT: - case AV_SAMPLE_FMT_FLTP: - data->bitsPerSample = 32; - data->floatingPoint = 1; - break; - - case AV_SAMPLE_FMT_DBL: - case AV_SAMPLE_FMT_DBLP: - data->bitsPerSample = 64; - data->floatingPoint = 1; - break; - - default: - goto fail; + /* check ways to skip encoder delay/padding, for debugging purposes (some may be old/unused/encoder only/etc) */ + VGM_ASSERT(data->codecCtx->delay > 0, "FFMPEG: delay %i\n", (int)data->codecCtx->delay);//delay: OPUS + //VGM_ASSERT(data->codecCtx->internal->skip_samples > 0, ...); /* for codec use, not accessible */ + VGM_ASSERT(stream->codecpar->initial_padding > 0, "FFMPEG: initial_padding %i\n", (int)stream->codecpar->initial_padding);//delay: OPUS + VGM_ASSERT(stream->codecpar->trailing_padding > 0, "FFMPEG: trailing_padding %i\n", (int)stream->codecpar->trailing_padding); + VGM_ASSERT(stream->codecpar->seek_preroll > 0, "FFMPEG: seek_preroll %i\n", (int)stream->codecpar->seek_preroll);//seek delay: OPUS + VGM_ASSERT(stream->skip_samples > 0, "FFMPEG: skip_samples %i\n", (int)stream->skip_samples); //delay: MP4 + VGM_ASSERT(stream->start_skip_samples > 0, "FFMPEG: start_skip_samples %i\n", (int)stream->start_skip_samples); //delay: MP3 + VGM_ASSERT(stream->first_discard_sample > 0, "FFMPEG: first_discard_sample %i\n", (int)stream->first_discard_sample); //padding: MP3 + VGM_ASSERT(stream->last_discard_sample > 0, "FFMPEG: last_discard_sample %i\n", (int)stream->last_discard_sample); //padding: MP3 + /* also negative timestamp for formats like OGG/OPUS */ + /* not using it: BINK, FLAC, ATRAC3, XMA, MPC, WMA (may use internal skip samples) */ + //todo: double check Opus behavior } - /* setup decode buffer */ - data->sampleBufferBlock = FFMPEG_DEFAULT_SAMPLE_BUFFER_SIZE; - data->sampleBuffer = av_malloc(data->sampleBufferBlock * (data->bitsPerSample / 8) * data->channels); - if (!data->sampleBuffer) goto fail; - - - /* try to guess frames/samples (duration isn't always set) */ - tb.num = 1; tb.den = data->codecCtx->sample_rate; - data->totalSamples = av_rescale_q(stream->duration, stream->time_base, tb); - if (data->totalSamples < 0) - data->totalSamples = 0; /* caller must consider this */ - - data->blockAlign = data->codecCtx->block_align; - data->frameSize = data->codecCtx->frame_size; - if(data->frameSize == 0) /* some formats don't set frame_size but can get on request, and vice versa */ - data->frameSize = av_get_audio_frame_duration(data->codecCtx,0); - - - /* reset */ - data->readNextPacket = 1; - data->bytesConsumedFromDecodedFrame = INT_MAX; - data->endOfStream = 0; - data->endOfAudio = 0; - - - /* expose start samples to be skipped (encoder delay, usually added by MDCT-based encoders like AAC/MP3/ATRAC3/XMA/etc) - * get after init_seek because some demuxers like AAC only fill skip_samples for the first packet */ - if (stream->start_skip_samples) /* samples to skip in the first packet */ - data->skipSamples = stream->start_skip_samples; - else if (stream->skip_samples) /* samples to skip in any packet (first in this case), used sometimes instead (ex. AAC) */ - data->skipSamples = stream->skip_samples; - - - /* check ways to skip encoder delay/padding, for debugging purposes (some may be old/unused/encoder only/etc) */ - VGM_ASSERT(data->codecCtx->delay > 0, "FFMPEG: delay %i\n", (int)data->codecCtx->delay);//delay: OPUS - //VGM_ASSERT(data->codecCtx->internal->skip_samples > 0, ...); /* for codec use, not accessible */ - VGM_ASSERT(stream->codecpar->initial_padding > 0, "FFMPEG: initial_padding %i\n", (int)stream->codecpar->initial_padding);//delay: OPUS - VGM_ASSERT(stream->codecpar->trailing_padding > 0, "FFMPEG: trailing_padding %i\n", (int)stream->codecpar->trailing_padding); - VGM_ASSERT(stream->codecpar->seek_preroll > 0, "FFMPEG: seek_preroll %i\n", (int)stream->codecpar->seek_preroll);//seek delay: OPUS - VGM_ASSERT(stream->skip_samples > 0, "FFMPEG: skip_samples %i\n", (int)stream->skip_samples); //delay: MP4 - VGM_ASSERT(stream->start_skip_samples > 0, "FFMPEG: start_skip_samples %i\n", (int)stream->start_skip_samples); //delay: MP3 - VGM_ASSERT(stream->first_discard_sample > 0, "FFMPEG: first_discard_sample %i\n", (int)stream->first_discard_sample); //padding: MP3 - VGM_ASSERT(stream->last_discard_sample > 0, "FFMPEG: last_discard_sample %i\n", (int)stream->last_discard_sample); //padding: MP3 - /* also negative timestamp for formats like OGG/OPUS */ - /* not using it: BINK, FLAC, ATRAC3, XMA, MPC, WMA (may use internal skip samples) */ - //todo: double check Opus behavior - /* setup decent seeking for faulty formats */ errcode = init_seek(data); if (errcode < 0) { - VGM_LOG("FFMPEG: can't init_seek, error=%i\n", errcode); - /* some formats like Smacker are so buggy that any seeking is impossible (even on video players) - * whatever, we'll just kill and reconstruct FFmpeg's config every time */ - data->force_seek = 1; - reset_ffmpeg_internal(data); /* reset state from trying to seek */ - //stream = data->formatCtx->streams[data->streamIndex]; + VGM_LOG("FFMPEG: can't init_seek, error=%i (using force_seek)\n", errcode); + ffmpeg_set_force_seek(data); } return data; @@ -547,15 +436,16 @@ static int init_ffmpeg_config(ffmpeg_codec_data * data, int target_subsong, int if (errcode < 0) goto fail; /* prepare codec and frame/packet buffers */ - data->lastDecodedFrame = av_frame_alloc(); - if (!data->lastDecodedFrame) goto fail; - av_frame_unref(data->lastDecodedFrame); - - data->lastReadPacket = av_malloc(sizeof(AVPacket)); /* av_packet_alloc? */ - if (!data->lastReadPacket) goto fail; - av_new_packet(data->lastReadPacket, 0); + data->packet = av_malloc(sizeof(AVPacket)); /* av_packet_alloc? */ + if (!data->packet) goto fail; + av_new_packet(data->packet, 0); //av_packet_unref? + data->frame = av_frame_alloc(); + if (!data->frame) goto fail; + av_frame_unref(data->frame); + + return 0; fail: if (errcode < 0) @@ -563,191 +453,280 @@ fail: return -1; } +/* decodes a new frame to internal data */ +static int decode_ffmpeg_frame(ffmpeg_codec_data *data) { + int errcode; + int frame_error = 0; + + + if (data->bad_init) { + goto fail; + } + + /* ignore once file is done (but not on EOF as FFmpeg can output samples until end_of_audio) */ + if (/*data->end_of_stream ||*/ data->end_of_audio) { + VGM_LOG("FFMPEG: decode after end of audio\n"); + goto fail; + } + + + /* read data packets until valid is found */ + while (data->read_packet && !data->end_of_audio) { + if (!data->end_of_stream) { + /* reset old packet */ + av_packet_unref(data->packet); + + /* read encoded data from demuxer into packet */ + errcode = av_read_frame(data->formatCtx, data->packet); + if (errcode < 0) { + if (errcode == AVERROR_EOF) { + data->end_of_stream = 1; /* no more data to read (but may "drain" samples) */ + } + else { + VGM_LOG("FFMPEG: av_read_frame errcode=%i\n", errcode); + frame_error = 1; //goto fail; + } + + if (data->formatCtx->pb && data->formatCtx->pb->error) { + VGM_LOG("FFMPEG: pb error=%i\n", data->formatCtx->pb->error); + frame_error = 1; //goto fail; + } + } + + /* ignore non-selected streams */ + if (data->packet->stream_index != data->streamIndex) + continue; + } + + /* send encoded data to frame decoder (NULL at EOF to "drain" samples below) */ + errcode = avcodec_send_packet(data->codecCtx, data->end_of_stream ? NULL : data->packet); + if (errcode < 0) { + if (errcode != AVERROR(EAGAIN)) { + VGM_LOG("FFMPEG: avcodec_send_packet errcode=%i\n", errcode); + frame_error = 1; //goto fail; + } + } + + data->read_packet = 0; /* got data */ + } + + /* decode frame samples from sent packet or "drain" samples*/ + if (!frame_error) { + /* receive uncompressed sample data from decoded frame */ + errcode = avcodec_receive_frame(data->codecCtx, data->frame); + if (errcode < 0) { + if (errcode == AVERROR_EOF) { + data->end_of_audio = 1; /* no more audio, file is fully decoded */ + } + else if (errcode == AVERROR(EAGAIN)) { + data->read_packet = 1; /* 0 samples, request more encoded data */ + } + else { + VGM_LOG("FFMPEG: avcodec_receive_frame errcode=%i\n", errcode); + frame_error = 1;//goto fail; + } + } + } + + /* on frame_error simply uses current frame (possibly with nb_samples=0), which mirrors ffmpeg's output + * (ex. BlazBlue X360 022_btl_az.xwb) */ + + + data->samples_consumed = 0; + data->samples_filled = data->frame->nb_samples; + return 1; +fail: + return 0; +} + + +/* sample copy helpers, using different functions to minimize branches. + * + * in theory, small optimizations like *outbuf++ vs outbuf[i] or alt clamping + * would matter for performance, but in practice aren't very noticeable; + * keep it simple for now until more tests are done. + * + * in normal (interleaved) formats samples are laid out straight + * (ibuf[s*chs+ch], ex. 4ch with 8s: 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3) + * in "p" (planar) formats samples are in planes per channel + * (ibuf[ch][s], ex. 4ch with 8s: 0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3) + * + * alt float clamping: + * clamp_float(f32) + * int s16 = (int)(f32 * 32768.0f); + * if ((unsigned)(s16 + 0x8000) & 0xFFFF0000) + * s16 = (s16 >> 31) ^ 0x7FFF; + * + * when casting float to int, value is simply truncated: + * - 0.0000518798828125 * 32768.0f = 1.7f, (int)1.7 = 1, (int)-1.7 = -1 + * alts for more accurate rounding could be: + * - (int)floor(f32 * 32768.0) //not quite ok negatives + * - (int)floor(f32 * 32768.0f + 0.5f) //Xiph Vorbis style + * - (int)(f32 < 0 ? f32 - 0.5f : f + 0.5f) + * - (((int) (f1 + 32768.5)) - 32768) + * - etc + * but since +-1 isn't really audible we'll just cast as it's the fastest + */ + +static void samples_silence_s16(sample_t* obuf, int ochs, int samples) { + int s, total_samples = samples * ochs; + for (s = 0; s < total_samples; s++) { + obuf[s] = 0; /* memset'd */ + } +} + +static void samples_u8_to_s16(sample_t* obuf, uint8_t* ibuf, int ichs, int samples, int skip) { + int s, total_samples = samples * ichs; + for (s = 0; s < total_samples; s++) { + obuf[s] = ((int)ibuf[skip*ichs + s] - 0x80) << 8; + } +} +static void samples_u8p_to_s16(sample_t* obuf, uint8_t** ibuf, int ichs, int samples, int skip) { + int s, ch; + for (ch = 0; ch < ichs; ch++) { + for (s = 0; s < samples; s++) { + obuf[s*ichs + ch] = ((int)ibuf[ch][skip + s] - 0x80) << 8; + } + } +} +static void samples_s16_to_s16(sample_t* obuf, int16_t* ibuf, int ichs, int samples, int skip) { + int s, total_samples = samples * ichs; + for (s = 0; s < total_samples; s++) { + obuf[s] = ibuf[skip*ichs + s]; /* maybe should mempcy */ + } +} +static void samples_s16p_to_s16(sample_t* obuf, int16_t** ibuf, int ichs, int samples, int skip) { + int s, ch; + for (ch = 0; ch < ichs; ch++) { + for (s = 0; s < samples; s++) { + obuf[s*ichs + ch] = ibuf[ch][skip + s]; + } + } +} +static void samples_s32_to_s16(sample_t* obuf, int32_t* ibuf, int ichs, int samples, int skip) { + int s, total_samples = samples * ichs; + for (s = 0; s < total_samples; s++) { + obuf[s] = ibuf[skip*ichs + s] >> 16; + } +} +static void samples_s32p_to_s16(sample_t* obuf, int32_t** ibuf, int ichs, int samples, int skip) { + int s, ch; + for (ch = 0; ch < ichs; ch++) { + for (s = 0; s < samples; s++) { + obuf[s*ichs + ch] = ibuf[ch][skip + s] >> 16; + } + } +} +static void samples_flt_to_s16(sample_t* obuf, float* ibuf, int ichs, int samples, int skip, int invert) { + int s, total_samples = samples * ichs; + float scale = invert ? -32768.0f : 32768.0f; + for (s = 0; s < total_samples; s++) { + obuf[s] = clamp16(ibuf[skip*ichs + s] * scale); + } +} +static void samples_fltp_to_s16(sample_t* obuf, float** ibuf, int ichs, int samples, int skip, int invert) { + int s, ch; + float scale = invert ? -32768.0f : 32768.0f; + for (ch = 0; ch < ichs; ch++) { + for (s = 0; s < samples; s++) { + obuf[s*ichs + ch] = clamp16(ibuf[ch][skip + s] * scale); + } + } +} +static void samples_dbl_to_s16(sample_t* obuf, double* ibuf, int ichs, int samples, int skip) { + int s, total_samples = samples * ichs; + for (s = 0; s < total_samples; s++) { + obuf[s] = clamp16(ibuf[skip*ichs + s] * 32768.0); + } +} +static void samples_dblp_to_s16(sample_t* obuf, double** inbuf, int ichs, int samples, int skip) { + int s, ch; + for (ch = 0; ch < ichs; ch++) { + for (s = 0; s < samples; s++) { + obuf[s*ichs + ch] = clamp16(inbuf[ch][skip + s] * 32768.0); + } + } +} + +static void copy_samples(ffmpeg_codec_data *data, sample_t *outbuf, int samples_to_do) { + int channels = data->codecCtx->channels; + int is_planar = av_sample_fmt_is_planar(data->codecCtx->sample_fmt) && (channels > 1); + void* ibuf; + + if (is_planar) { + ibuf = data->frame->extended_data; + } + else { + ibuf = data->frame->data[0]; + } + + switch (data->codecCtx->sample_fmt) { + /* unused? */ + case AV_SAMPLE_FMT_U8P: if (is_planar) { samples_u8p_to_s16(outbuf, ibuf, channels, samples_to_do, data->samples_consumed); break; } + case AV_SAMPLE_FMT_U8: samples_u8_to_s16(outbuf, ibuf, channels, samples_to_do, data->samples_consumed); break; + /* common */ + case AV_SAMPLE_FMT_S16P: if (is_planar) { samples_s16p_to_s16(outbuf, ibuf, channels, samples_to_do, data->samples_consumed); break; } + case AV_SAMPLE_FMT_S16: samples_s16_to_s16(outbuf, ibuf, channels, samples_to_do, data->samples_consumed); break; + /* possibly FLAC and other lossless codecs */ + case AV_SAMPLE_FMT_S32P: if (is_planar) { samples_s32p_to_s16(outbuf, ibuf, channels, samples_to_do, data->samples_consumed); break; } + case AV_SAMPLE_FMT_S32: samples_s32_to_s16(outbuf, ibuf, channels, samples_to_do, data->samples_consumed); break; + /* mainly MDCT-like codecs (Ogg, AAC, etc) */ + case AV_SAMPLE_FMT_FLTP: if (is_planar) { samples_fltp_to_s16(outbuf, ibuf, channels, samples_to_do, data->samples_consumed, data->invert_floats_set); break; } + case AV_SAMPLE_FMT_FLT: samples_flt_to_s16(outbuf, ibuf, channels, samples_to_do, data->samples_consumed, data->invert_floats_set); break; + /* possibly PCM64 only (not enabled) */ + case AV_SAMPLE_FMT_DBLP: if (is_planar) { samples_dblp_to_s16(outbuf, ibuf, channels, samples_to_do, data->samples_consumed); break; } + case AV_SAMPLE_FMT_DBL: samples_dbl_to_s16(outbuf, ibuf, channels, samples_to_do, data->samples_consumed); break; + default: + break; + } + + if (data->channel_remap_set) + remap_audio(outbuf, samples_to_do, channels, data->channel_remap); +} /* decode samples of any kind of FFmpeg format */ void decode_ffmpeg(VGMSTREAM *vgmstream, sample_t * outbuf, int32_t samples_to_do, int channels) { ffmpeg_codec_data *data = vgmstream->codec_data; - int samplesReadNow; - //todo use either channels / data->channels / codecCtx->channels - - AVFormatContext *formatCtx = data->formatCtx; - AVCodecContext *codecCtx = data->codecCtx; - AVPacket *packet = data->lastReadPacket; - AVFrame *frame = data->lastDecodedFrame; - - int readNextPacket = data->readNextPacket; - int endOfStream = data->endOfStream; - int endOfAudio = data->endOfAudio; - int bytesConsumedFromDecodedFrame = data->bytesConsumedFromDecodedFrame; - - int planar = 0; - int bytesPerSample = data->bitsPerSample / 8; - int bytesRead, bytesToRead; - if (data->bad_init) { - memset(outbuf, 0, samples_to_do * channels * sizeof(sample)); - return; - } + while (samples_to_do > 0) { - /* ignore once file is done (but not at endOfStream as FFmpeg can still output samples until endOfAudio) */ - if (/*endOfStream ||*/ endOfAudio) { - VGM_LOG("FFMPEG: decode after end of audio\n"); - memset(outbuf, 0, samples_to_do * channels * sizeof(sample)); - return; - } + if (data->samples_consumed < data->samples_filled) { + /* consume samples */ + int samples_to_get = (data->samples_filled - data->samples_consumed); - planar = av_sample_fmt_is_planar(codecCtx->sample_fmt); - bytesRead = 0; - bytesToRead = samples_to_do * (bytesPerSample * codecCtx->channels); - - - /* keep reading and decoding packets until the requested number of samples (in bytes for FFmpeg calcs) */ - while (bytesRead < bytesToRead) { - int dataSize, toConsume, errcode; - - /* get sample data size from current frame (dataSize will be < 0 when nb_samples = 0) */ - dataSize = av_samples_get_buffer_size(NULL, codecCtx->channels, frame->nb_samples, codecCtx->sample_fmt, 1); - if (dataSize < 0) - dataSize = 0; - - /* read new data packet when requested */ - while (readNextPacket && !endOfAudio) { - if (!endOfStream) { - /* reset old packet */ - av_packet_unref(packet); - - /* get compressed data from demuxer into packet */ - errcode = av_read_frame(formatCtx, packet); - if (errcode < 0) { - if (errcode == AVERROR_EOF) { - endOfStream = 1; /* no more data, but may still output samples */ - } - else { - VGM_LOG("FFMPEG: av_read_frame errcode %i\n", errcode); - } - - if (formatCtx->pb && formatCtx->pb->error) { - break; - } - } - - if (packet->stream_index != data->streamIndex) - continue; /* ignore non-selected streams */ - } - - /* send compressed data to decoder in packet (NULL at EOF to "drain") */ - errcode = avcodec_send_packet(codecCtx, endOfStream ? NULL : packet); - if (errcode < 0) { - if (errcode != AVERROR(EAGAIN)) { - VGM_LOG("FFMPEG: avcodec_send_packet errcode %i\n", errcode); - goto end; - } - } - - readNextPacket = 0; /* got compressed data */ - } - - /* decode packet into frame's sample data (if we don't have bytes to consume from previous frame) */ - if (dataSize <= bytesConsumedFromDecodedFrame) { - if (endOfAudio) { - break; - } - - bytesConsumedFromDecodedFrame = 0; - - /* receive uncompressed sample data from decoder in frame */ - errcode = avcodec_receive_frame(codecCtx, frame); - if (errcode < 0) { - if (errcode == AVERROR_EOF) { - endOfAudio = 1; /* no more samples, file is fully decoded */ - break; - } - else if (errcode == AVERROR(EAGAIN)) { - readNextPacket = 1; /* request more compressed data */ - continue; - } - else { - VGM_LOG("FFMPEG: avcodec_receive_frame errcode %i\n", errcode); - goto end; - } - } - - /* get sample data size of current frame */ - dataSize = av_samples_get_buffer_size(NULL, codecCtx->channels, frame->nb_samples, codecCtx->sample_fmt, 1); - if (dataSize < 0) - dataSize = 0; - } - - toConsume = FFMIN((dataSize - bytesConsumedFromDecodedFrame), (bytesToRead - bytesRead)); - - - /* discard decoded frame if needed (fully or partially) */ - if (data->samplesToDiscard) { - int samplesDataSize = dataSize / (bytesPerSample * channels); - - if (data->samplesToDiscard >= samplesDataSize) { - /* discard all of the frame's samples and continue to the next */ - bytesConsumedFromDecodedFrame = dataSize; - data->samplesToDiscard -= samplesDataSize; - continue; + if (data->samples_discard) { + /* discard samples for looping */ + if (samples_to_get > data->samples_discard) + samples_to_get = data->samples_discard; + data->samples_discard -= samples_to_get; } else { - /* discard part of the frame and copy the rest below */ - int bytesToDiscard = data->samplesToDiscard * (bytesPerSample * channels); - int dataSizeLeft = dataSize - bytesToDiscard; + /* get max samples and copy */ + if (samples_to_get > samples_to_do) + samples_to_get = samples_to_do; - bytesConsumedFromDecodedFrame += bytesToDiscard; - data->samplesToDiscard = 0; - if (toConsume > dataSizeLeft) - toConsume = dataSizeLeft; + copy_samples(data, outbuf, samples_to_get); + + //samples_done += samples_to_get; + samples_to_do -= samples_to_get; + outbuf += samples_to_get * channels; } - } - - /* copy decoded sample data to buffer */ - if (!planar || channels == 1) { /* 1 sample per channel, already mixed */ - memmove(data->sampleBuffer + bytesRead, (frame->data[0] + bytesConsumedFromDecodedFrame), toConsume); + /* mark consumed samples */ + data->samples_consumed += samples_to_get; } - else { /* N samples per channel, mix to 1 sample per channel */ - uint8_t * out = (uint8_t *) data->sampleBuffer + bytesRead; - int bytesConsumedPerPlane = bytesConsumedFromDecodedFrame / channels; - int toConsumePerPlane = toConsume / channels; - int s, ch; - for (s = 0; s < toConsumePerPlane; s += bytesPerSample) { - for (ch = 0; ch < channels; ++ch) { - memcpy(out, frame->extended_data[ch] + bytesConsumedPerPlane + s, bytesPerSample); - out += bytesPerSample; - } - } + else { + int ok = decode_ffmpeg_frame(data); + if (!ok) goto decode_fail; } - - /* consume */ - bytesConsumedFromDecodedFrame += toConsume; - bytesRead += toConsume; } + return; -end: - /* convert native sample format into PCM16 outbuf */ - samplesReadNow = bytesRead / (bytesPerSample * channels); - convert_audio_pcm16(outbuf, data->sampleBuffer, samplesReadNow * channels, data->bitsPerSample, data->floatingPoint); - if (data->channel_remap_set) - remap_audio(outbuf, samplesReadNow, data->channels, data->channel_remap); - if (data->invert_audio_set) - invert_audio(outbuf, samplesReadNow, data->channels); - - /* clean buffer when requested more samples than possible */ - if (endOfAudio && samplesReadNow < samples_to_do) { - VGM_LOG("FFMPEG: decode after end of audio %i samples\n", (samples_to_do - samplesReadNow)); - memset(outbuf + (samplesReadNow * channels), 0, (samples_to_do - samplesReadNow) * channels * sizeof(sample)); - } - - /* copy state back */ - data->readNextPacket = readNextPacket; - data->endOfStream = endOfStream; - data->endOfAudio = endOfAudio; - data->bytesConsumedFromDecodedFrame = bytesConsumedFromDecodedFrame; +decode_fail: + VGM_LOG("FFMPEG: decode fail, missing %i samples\n", samples_to_do); + samples_silence_s16(outbuf, channels, samples_to_do); } @@ -766,7 +745,7 @@ void seek_ffmpeg_internal(ffmpeg_codec_data *data, int32_t num_sample) { if (!data) return; /* Start from 0 and discard samples until sample (slower but not too noticeable). - * Due to various FFmpeg quirks seeking to a sample is erratic in many formats (would need extra steps). */ + * Due to many FFmpeg quirks seeking to a sample is erratic at best in most formats. */ if (data->force_seek) { int errcode; @@ -787,21 +766,22 @@ void seek_ffmpeg_internal(ffmpeg_codec_data *data, int32_t num_sample) { avcodec_flush_buffers(data->codecCtx); } - data->samplesToDiscard = num_sample; + data->samples_consumed = 0; + data->samples_filled = 0; + data->samples_discard = num_sample; - data->readNextPacket = 1; - data->bytesConsumedFromDecodedFrame = INT_MAX; - data->endOfStream = 0; - data->endOfAudio = 0; + data->read_packet = 1; + data->end_of_stream = 0; + data->end_of_audio = 0; /* consider skip samples (encoder delay), if manually set (otherwise let FFmpeg handle it) */ - if (data->skipSamplesSet) { + if (data->skip_samples_set) { AVStream *stream = data->formatCtx->streams[data->streamIndex]; /* sometimes (ex. AAC) after seeking to the first packet skip_samples is restored, but we want our value */ stream->skip_samples = 0; stream->start_skip_samples = 0; - data->samplesToDiscard += data->skipSamples; + data->samples_discard += data->skipSamples; } return; @@ -819,15 +799,15 @@ static void free_ffmpeg_config(ffmpeg_codec_data *data) { if (data == NULL) return; - if (data->lastReadPacket) { - av_packet_unref(data->lastReadPacket); - av_free(data->lastReadPacket); - data->lastReadPacket = NULL; + if (data->packet) { + av_packet_unref(data->packet); + av_free(data->packet); + data->packet = NULL; } - if (data->lastDecodedFrame) { - av_frame_unref(data->lastDecodedFrame); - av_free(data->lastDecodedFrame); - data->lastDecodedFrame = NULL; + if (data->frame) { + av_frame_unref(data->frame); + av_free(data->frame); + data->frame = NULL; } if (data->codecCtx) { avcodec_close(data->codecCtx); @@ -841,7 +821,7 @@ static void free_ffmpeg_config(ffmpeg_codec_data *data) { } if (data->ioCtx) { /* buffer passed in is occasionally freed and replaced. - // the replacement must be free'd as well (below) */ + * the replacement must be free'd as well (below) */ data->buffer = data->ioCtx->buffer; avio_context_free(&data->ioCtx); //av_free(data->ioCtx); /* done in context_free (same thing) */ @@ -852,7 +832,7 @@ static void free_ffmpeg_config(ffmpeg_codec_data *data) { data->buffer = NULL; } - //todo avformat_find_stream_info may cause some Win Handle leaks? related to certain option (not happening in gcc builds) + //todo avformat_find_stream_info may cause some Win Handle leaks? related to certain option } void free_ffmpeg(ffmpeg_codec_data *data) { @@ -861,13 +841,9 @@ void free_ffmpeg(ffmpeg_codec_data *data) { free_ffmpeg_config(data); - if (data->sampleBuffer) { - av_free(data->sampleBuffer); - data->sampleBuffer = NULL; - } - if (data->header_insert_block) { - av_free(data->header_insert_block); - data->header_insert_block = NULL; + if (data->header_block) { + av_free(data->header_block); + data->header_block = NULL; } close_streamfile(data->streamfile); @@ -895,8 +871,8 @@ void ffmpeg_set_skip_samples(ffmpeg_codec_data * data, int skip_samples) { stream->skip_samples = 0; /* skip_samples can be used for any packet */ /* set skip samples with our internal discard */ - data->skipSamplesSet = 1; - data->samplesToDiscard = skip_samples; + data->skip_samples_set = 1; + data->samples_discard = skip_samples; /* expose (info only) */ data->skipSamples = skip_samples; @@ -923,4 +899,24 @@ void ffmpeg_set_channel_remapping(ffmpeg_codec_data * data, int *channel_remap) data->channel_remap_set = 1; } +const char* ffmpeg_get_codec_name(ffmpeg_codec_data * data) { + if (!data || !data->codec) + return NULL; + if (data->codec->long_name) + return data->codec->long_name; + if (data->codec->name) + return data->codec->name; + return NULL; +} + +void ffmpeg_set_force_seek(ffmpeg_codec_data * data) { + /* some formats like Smacker are so buggy that any seeking is impossible (even on video players), + * or MPC with an incorrectly parsed seek table (using as 0 some non-0 seek offset). + * whatever, we'll just kill and reconstruct FFmpeg's config every time */ + ;VGM_LOG("1\n"); + data->force_seek = 1; + reset_ffmpeg_internal(data); /* reset state from trying to seek */ + //stream = data->formatCtx->streams[data->streamIndex]; +} + #endif diff --git a/Frameworks/vgmstream/vgmstream/src/coding/ffmpeg_decoder_utils.c b/Frameworks/vgmstream/vgmstream/src/coding/ffmpeg_decoder_utils.c index 83a94c22c..3b6743104 100644 --- a/Frameworks/vgmstream/vgmstream/src/coding/ffmpeg_decoder_utils.c +++ b/Frameworks/vgmstream/vgmstream/src/coding/ffmpeg_decoder_utils.c @@ -66,7 +66,7 @@ ffmpeg_codec_data * init_ffmpeg_atrac3_raw(STREAMFILE *sf, off_t offset, size_t /* invert ATRAC3: waveform is inverted vs official tools (not noticeable but for accuracy) */ if (is_at3) { - ffmpeg_data->invert_audio_set = 1; + ffmpeg_data->invert_floats_set = 1; } return ffmpeg_data; @@ -159,7 +159,7 @@ ffmpeg_codec_data * init_ffmpeg_atrac3_riff(STREAMFILE *sf, off_t offset, int* o /* invert ATRAC3: waveform is inverted vs official tools (not noticeable but for accuracy) */ if (is_at3) { - ffmpeg_data->invert_audio_set = 1; + ffmpeg_data->invert_floats_set = 1; } /* multichannel fix: LFE channel should be reordered on decode (ATRAC3Plus only, only 1/2/6/8ch exist): diff --git a/Frameworks/vgmstream/vgmstream/src/coding/ima_decoder.c b/Frameworks/vgmstream/vgmstream/src/coding/ima_decoder.c index ba82ac8ce..c58327565 100644 --- a/Frameworks/vgmstream/vgmstream/src/coding/ima_decoder.c +++ b/Frameworks/vgmstream/vgmstream/src/coding/ima_decoder.c @@ -1124,11 +1124,14 @@ size_t ms_ima_bytes_to_samples(size_t bytes, int block_align, int channels) { } size_t xbox_ima_bytes_to_samples(size_t bytes, int channels) { + int mod; int block_align = 0x24 * channels; if (channels <= 0) return 0; + + mod = bytes % block_align; /* XBOX IMA blocks have a 4 byte header per channel; 2 samples per byte (2 nibbles) */ return (bytes / block_align) * (block_align - 4 * channels) * 2 / channels - + ((bytes % block_align) ? ((bytes % block_align) - 4 * channels) * 2 / channels : 0); /* unlikely (encoder aligns) */ + + ((mod > 0 && mod > 0x04*channels) ? (mod - 0x04*channels) * 2 / channels : 0); /* unlikely (encoder aligns) */ } size_t dat4_ima_bytes_to_samples(size_t bytes, int channels) { diff --git a/Frameworks/vgmstream/vgmstream/src/coding/ngc_dsp_decoder.c b/Frameworks/vgmstream/vgmstream/src/coding/ngc_dsp_decoder.c index 773e96125..6f7fe22cc 100644 --- a/Frameworks/vgmstream/vgmstream/src/coding/ngc_dsp_decoder.c +++ b/Frameworks/vgmstream/vgmstream/src/coding/ngc_dsp_decoder.c @@ -1,69 +1,103 @@ #include "coding.h" #include "../util.h" + void decode_ngc_dsp(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do) { - int i=first_sample; - int32_t sample_count; - - int framesin = first_sample/14; - - int8_t header = read_8bit(framesin*8+stream->offset,stream->streamfile); - int32_t scale = 1 << (header & 0xf); - int coef_index = (header >> 4) & 0xf; + uint8_t frame[0x08] = {0}; + off_t frame_offset; + int i, frames_in, sample_count = 0; + size_t bytes_per_frame, samples_per_frame; + int coef_index, scale, coef1, coef2; int32_t hist1 = stream->adpcm_history1_16; int32_t hist2 = stream->adpcm_history2_16; - int coef1 = stream->adpcm_coef[coef_index*2]; - int coef2 = stream->adpcm_coef[coef_index*2+1]; - first_sample = first_sample%14; - for (i=first_sample,sample_count=0; ioffset+1+i/2,stream->streamfile); + /* external interleave (fixed size), mono */ + bytes_per_frame = 0x08; + samples_per_frame = (bytes_per_frame - 0x01) * 2; /* always 14 */ + frames_in = first_sample / samples_per_frame; + first_sample = first_sample % samples_per_frame; - outbuf[sample_count] = clamp16(( - (((i&1? - get_low_nibble_signed(sample_byte): - get_high_nibble_signed(sample_byte) - ) * scale)<<11) + 1024 + - (coef1 * hist1 + coef2 * hist2))>>11 - ); + /* parse frame header */ + frame_offset = stream->offset + bytes_per_frame * frames_in; + read_streamfile(frame, frame_offset, bytes_per_frame, stream->streamfile); /* ignore EOF errors */ + scale = 1 << ((frame[0] >> 0) & 0xf); + coef_index = (frame[0] >> 4) & 0xf; + + VGM_ASSERT_ONCE(coef_index > 8, "DSP: incorrect coefs at %x\n", (uint32_t)frame_offset); + //if (coef_index > 8) //todo not correctly clamped in original decoder? + // coef_index = 8; + + coef1 = stream->adpcm_coef[coef_index*2 + 0]; + coef2 = stream->adpcm_coef[coef_index*2 + 1]; + + + /* decode nibbles */ + for (i = first_sample; i < first_sample + samples_to_do; i++) { + int32_t sample = 0; + uint8_t nibbles = frame[0x01 + i/2]; + + sample = i&1 ? /* high nibble first */ + get_low_nibble_signed(nibbles) : + get_high_nibble_signed(nibbles); + sample = ((sample * scale) << 11); + sample = (sample + 1024 + coef1*hist1 + coef2*hist2) >> 11; + sample = clamp16(sample); + + outbuf[sample_count] = sample; + sample_count += channelspacing; hist2 = hist1; - hist1 = outbuf[sample_count]; + hist1 = sample; } stream->adpcm_history1_16 = hist1; stream->adpcm_history2_16 = hist2; } -/* read from memory rather than a file */ -static void decode_ngc_dsp_subint_internal(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, uint8_t * mem) { - int i=first_sample; - int32_t sample_count; - int8_t header = mem[0]; - int32_t scale = 1 << (header & 0xf); - int coef_index = (header >> 4) & 0xf; +/* read from memory rather than a file */ +static void decode_ngc_dsp_subint_internal(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, uint8_t * frame) { + int i, sample_count = 0; + size_t bytes_per_frame, samples_per_frame; + int coef_index, scale, coef1, coef2; int32_t hist1 = stream->adpcm_history1_16; int32_t hist2 = stream->adpcm_history2_16; - int coef1 = stream->adpcm_coef[coef_index*2]; - int coef2 = stream->adpcm_coef[coef_index*2+1]; - first_sample = first_sample%14; - for (i=first_sample,sample_count=0; i samples_per_frame, "DSP: layout error, too many samples\n"); - outbuf[sample_count] = clamp16(( - (((i&1? - get_low_nibble_signed(sample_byte): - get_high_nibble_signed(sample_byte) - ) * scale)<<11) + 1024 + - (coef1 * hist1 + coef2 * hist2))>>11 - ); + /* parse frame header */ + scale = 1 << ((frame[0] >> 0) & 0xf); + coef_index = (frame[0] >> 4) & 0xf; + + VGM_ASSERT_ONCE(coef_index > 8, "DSP: incorrect coefs\n"); + //if (coef_index > 8) //todo not correctly clamped in original decoder? + // coef_index = 8; + + coef1 = stream->adpcm_coef[coef_index*2 + 0]; + coef2 = stream->adpcm_coef[coef_index*2 + 1]; + + for (i = first_sample; i < first_sample + samples_to_do; i++) { + int32_t sample = 0; + uint8_t nibbles = frame[0x01 + i/2]; + + sample = i&1 ? + get_low_nibble_signed(nibbles) : + get_high_nibble_signed(nibbles); + sample = ((sample * scale) << 11); + sample = (sample + 1024 + coef1*hist1 + coef2*hist2) >> 11; + sample = clamp16(sample); + + outbuf[sample_count] = sample; + sample_count += channelspacing; hist2 = hist1; - hist1 = outbuf[sample_count]; + hist1 = sample; } stream->adpcm_history1_16 = hist1; @@ -72,22 +106,21 @@ static void decode_ngc_dsp_subint_internal(VGMSTREAMCHANNEL * stream, sample_t * /* decode DSP with byte-interleaved frames (ex. 0x08: 1122112211221122) */ void decode_ngc_dsp_subint(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, int channel, int interleave) { - uint8_t sample_data[0x08]; + uint8_t frame[0x08]; int i; + int frames_in = first_sample / 14; - int framesin = first_sample/14; - - for (i=0; i < 0x08; i++) { + for (i = 0; i < 0x08; i++) { /* base + current frame + subint section + subint byte + channel adjust */ - sample_data[i] = read_8bit( + frame[i] = read_8bit( stream->offset - + framesin*(0x08*channelspacing) + + frames_in*(0x08*channelspacing) + i/interleave * interleave * channelspacing + i%interleave + interleave * channel, stream->streamfile); } - decode_ngc_dsp_subint_internal(stream, outbuf, channelspacing, first_sample, samples_to_do, sample_data); + decode_ngc_dsp_subint_internal(stream, outbuf, channelspacing, first_sample, samples_to_do, frame); } diff --git a/Frameworks/vgmstream/vgmstream/src/coding/psv_decoder.c b/Frameworks/vgmstream/vgmstream/src/coding/psv_decoder.c index 7136c837a..a6d1b9414 100644 --- a/Frameworks/vgmstream/vgmstream/src/coding/psv_decoder.c +++ b/Frameworks/vgmstream/vgmstream/src/coding/psv_decoder.c @@ -3,7 +3,7 @@ #include "../util.h" /* PSVita ADPCM table */ -static const int16_t HEVAG_coefs[128][4] = { +static const int16_t hevag_coefs[128][4] = { { 0, 0, 0, 0 }, { 7680, 0, 0, 0 }, { 14720, -6656, 0, 0 }, @@ -141,59 +141,58 @@ static const int16_t HEVAG_coefs[128][4] = { * * Original research and algorithm by id-daemon / daemon1. */ -void decode_hevag(VGMSTREAMCHANNEL * stream, sample * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do) { - - uint8_t predict_nr, shift, flag, byte; - int32_t scale = 0; - - int32_t sample; +void decode_hevag(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do) { + uint8_t frame[0x10] = {0}; + off_t frame_offset; + int i, frames_in, sample_count = 0; + size_t bytes_per_frame, samples_per_frame; + int coef_index, shift_factor, flag; int32_t hist1 = stream->adpcm_history1_32; int32_t hist2 = stream->adpcm_history2_32; int32_t hist3 = stream->adpcm_history3_32; int32_t hist4 = stream->adpcm_history4_32; - int i, sample_count; + /* external interleave (fixed size), mono */ + bytes_per_frame = 0x10; + samples_per_frame = (bytes_per_frame - 0x02) * 2; /* always 28 */ + frames_in = first_sample / samples_per_frame; + first_sample = first_sample % samples_per_frame; - int framesin = first_sample / 28; + /* parse frame header */ + frame_offset = stream->offset + bytes_per_frame * frames_in; + read_streamfile(frame, frame_offset, bytes_per_frame, stream->streamfile); /* ignore EOF errors */ + coef_index = (frame[0] >> 4) & 0xf; + shift_factor = (frame[0] >> 0) & 0xf; + coef_index = ((frame[1] >> 0) & 0xf0) | coef_index; + flag = (frame[1] >> 0) & 0xf; /* same flags */ - /* 4 byte header: predictor = 3rd and 1st, shift = 2nd, flag = 4th */ - byte = (uint8_t)read_8bit(stream->offset+framesin*16+0,stream->streamfile); - predict_nr = byte >> 4; - shift = byte & 0x0f; - byte = (uint8_t)read_8bit(stream->offset+framesin*16+1,stream->streamfile); - predict_nr = (byte & 0xF0) | predict_nr; - flag = byte & 0x0f; /* no change in flags */ + VGM_ASSERT_ONCE(coef_index > 127 || shift_factor > 12, "HEVAG: in+correct coefs/shift at %x\n", (uint32_t)frame_offset); + if (coef_index > 127) + coef_index = 127; /* ? */ + if (shift_factor > 12) + shift_factor = 9; /* ? */ - first_sample = first_sample % 28; + /* decode nibbles */ + for (i = first_sample; i < first_sample + samples_to_do; i++) { + int32_t sample = 0, scale = 0; - if (first_sample & 1) { /* if first sample is odd, read byte first */ - byte = read_8bit(stream->offset+(framesin*16)+2+first_sample/2,stream->streamfile); - } + if (flag < 0x07) { /* with flag 0x07 decoded sample must be 0 */ + uint8_t nibbles = frame[0x02 + i/2]; - for (i = first_sample, sample_count = 0; i < first_sample + samples_to_do; i++, sample_count += channelspacing) { - sample = 0; - - if (flag < 7 && predict_nr < 128) { - - if (i & 1) {/* odd/even nibble */ - scale = byte >> 4; - } else { - byte = read_8bit(stream->offset+(framesin*16)+2+i/2,stream->streamfile); - scale = byte & 0x0f; - } - if (scale > 7) { /* sign extend */ - scale = scale - 16; - } - - sample = (hist1 * HEVAG_coefs[predict_nr][0] + - hist2 * HEVAG_coefs[predict_nr][1] + - hist3 * HEVAG_coefs[predict_nr][2] + - hist4 * HEVAG_coefs[predict_nr][3] ) / 32; - sample = (sample + (scale << (20 - shift)) + 128) >> 8; + scale = i&1 ? /* low nibble first */ + get_high_nibble_signed(nibbles): + get_low_nibble_signed(nibbles); + sample = (hist1 * hevag_coefs[coef_index][0] + + hist2 * hevag_coefs[coef_index][1] + + hist3 * hevag_coefs[coef_index][2] + + hist4 * hevag_coefs[coef_index][3] ) / 32; + sample = (sample + (scale << (20 - shift_factor)) + 128) >> 8; } - outbuf[sample_count] = clamp16(sample); + outbuf[sample_count] = sample; + sample_count += channelspacing; + hist4 = hist3; hist3 = hist2; hist2 = hist1; diff --git a/Frameworks/vgmstream/vgmstream/src/coding/psx_decoder.c b/Frameworks/vgmstream/vgmstream/src/coding/psx_decoder.c index f5e7ebd41..1107d0974 100644 --- a/Frameworks/vgmstream/vgmstream/src/coding/psx_decoder.c +++ b/Frameworks/vgmstream/vgmstream/src/coding/psx_decoder.c @@ -2,7 +2,7 @@ /* PS-ADPCM table, defined as rational numbers (as in the spec) */ -static const double ps_adpcm_coefs_f[5][2] = { +static const float ps_adpcm_coefs_f[5][2] = { { 0.0 , 0.0 }, //{ 0.0 , 0.0 }, { 0.9375 , 0.0 }, //{ 60.0 / 64.0 , 0.0 }, { 1.796875 , -0.8125 }, //{ 115.0 / 64.0 , -52.0 / 64.0 }, @@ -44,6 +44,7 @@ static const int ps_adpcm_coefs_i[5][2] = { /* standard PS-ADPCM (float math version) */ void decode_psx(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, int is_badflags) { + uint8_t frame[0x10] = {0}; off_t frame_offset; int i, frames_in, sample_count = 0; size_t bytes_per_frame, samples_per_frame; @@ -51,6 +52,7 @@ void decode_psx(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int channelspacing int32_t hist1 = stream->adpcm_history1_32; int32_t hist2 = stream->adpcm_history2_32; + /* external interleave (fixed size), mono */ bytes_per_frame = 0x10; samples_per_frame = (bytes_per_frame - 0x02) * 2; /* always 28 */ @@ -58,10 +60,11 @@ void decode_psx(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int channelspacing first_sample = first_sample % samples_per_frame; /* parse frame header */ - frame_offset = stream->offset + bytes_per_frame*frames_in; - coef_index = ((uint8_t)read_8bit(frame_offset+0x00,stream->streamfile) >> 4) & 0xf; - shift_factor = ((uint8_t)read_8bit(frame_offset+0x00,stream->streamfile) >> 0) & 0xf; - flag = (uint8_t)read_8bit(frame_offset+0x01,stream->streamfile); /* only lower nibble needed */ + frame_offset = stream->offset + bytes_per_frame * frames_in; + read_streamfile(frame, frame_offset, bytes_per_frame, stream->streamfile); /* ignore EOF errors */ + coef_index = (frame[0] >> 4) & 0xf; + shift_factor = (frame[0] >> 0) & 0xf; + flag = frame[1]; /* only lower nibble needed */ VGM_ASSERT_ONCE(coef_index > 5 || shift_factor > 12, "PS-ADPCM: incorrect coefs/shift at %x\n", (uint32_t)frame_offset); if (coef_index > 5) /* needed by inFamous (PS3) (maybe it's supposed to use more filters?) */ @@ -73,18 +76,19 @@ void decode_psx(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int channelspacing flag = 0; VGM_ASSERT_ONCE(flag > 7,"PS-ADPCM: unknown flag at %x\n", (uint32_t)frame_offset); /* meta should use PSX-badflags */ + /* decode nibbles */ for (i = first_sample; i < first_sample + samples_to_do; i++) { int32_t sample = 0; if (flag < 0x07) { /* with flag 0x07 decoded sample must be 0 */ - uint8_t nibbles = (uint8_t)read_8bit(frame_offset+0x02+i/2,stream->streamfile); + uint8_t nibbles = frame[0x02 + i/2]; sample = i&1 ? /* low nibble first */ (nibbles >> 4) & 0x0f : (nibbles >> 0) & 0x0f; sample = (int16_t)((sample << 12) & 0xf000) >> shift_factor; /* 16b sign extend + scale */ - sample = (int)(sample + ps_adpcm_coefs_f[coef_index][0]*hist1 + ps_adpcm_coefs_f[coef_index][1]*hist2); + sample = (int32_t)(sample + ps_adpcm_coefs_f[coef_index][0]*hist1 + ps_adpcm_coefs_f[coef_index][1]*hist2); sample = clamp16(sample); } @@ -105,6 +109,7 @@ void decode_psx(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int channelspacing * * Uses int math to decode, which seems more likely (based on FF XI PC's code in Moogle Toolbox). */ void decode_psx_configurable(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, int frame_size) { + uint8_t frame[0x50] = {0}; off_t frame_offset; int i, frames_in, sample_count = 0; size_t bytes_per_frame, samples_per_frame; @@ -112,6 +117,7 @@ void decode_psx_configurable(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int c int32_t hist1 = stream->adpcm_history1_32; int32_t hist2 = stream->adpcm_history2_32; + /* external interleave (variable size), mono */ bytes_per_frame = frame_size; samples_per_frame = (bytes_per_frame - 0x01) * 2; @@ -119,9 +125,10 @@ void decode_psx_configurable(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int c first_sample = first_sample % samples_per_frame; /* parse frame header */ - frame_offset = stream->offset + bytes_per_frame*frames_in; - coef_index = ((uint8_t)read_8bit(frame_offset+0x00,stream->streamfile) >> 4) & 0xf; - shift_factor = ((uint8_t)read_8bit(frame_offset+0x00,stream->streamfile) >> 0) & 0xf; + frame_offset = stream->offset + bytes_per_frame * frames_in; + read_streamfile(frame, frame_offset, bytes_per_frame, stream->streamfile); /* ignore EOF errors */ + coef_index = (frame[0] >> 4) & 0xf; + shift_factor = (frame[0] >> 0) & 0xf; VGM_ASSERT_ONCE(coef_index > 5 || shift_factor > 12, "PS-ADPCM: incorrect coefs/shift at %x\n", (uint32_t)frame_offset); if (coef_index > 5) /* needed by Afrika (PS3) (maybe it's supposed to use more filters?) */ @@ -129,10 +136,11 @@ void decode_psx_configurable(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int c if (shift_factor > 12) shift_factor = 9; /* supposedly, from Nocash PSX docs */ + /* decode nibbles */ for (i = first_sample; i < first_sample + samples_to_do; i++) { int32_t sample = 0; - uint8_t nibbles = (uint8_t)read_8bit(frame_offset+0x01+i/2,stream->streamfile); + uint8_t nibbles = frame[0x01 + i/2]; sample = i&1 ? /* low nibble first */ (nibbles >> 4) & 0x0f : @@ -154,6 +162,7 @@ void decode_psx_configurable(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int c /* PS-ADPCM from Pivotal games, exactly like psx_cfg but with float math (reverse engineered from the exe) */ void decode_psx_pivotal(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, int frame_size) { + uint8_t frame[0x50] = {0}; off_t frame_offset; int i, frames_in, sample_count = 0; size_t bytes_per_frame, samples_per_frame; @@ -162,6 +171,7 @@ void decode_psx_pivotal(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int channe int32_t hist2 = stream->adpcm_history2_32; float scale; + /* external interleave (variable size), mono */ bytes_per_frame = frame_size; samples_per_frame = (bytes_per_frame - 0x01) * 2; @@ -169,21 +179,24 @@ void decode_psx_pivotal(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int channe first_sample = first_sample % samples_per_frame; /* parse frame header */ - frame_offset = stream->offset + bytes_per_frame*frames_in; - coef_index = ((uint8_t)read_8bit(frame_offset+0x00,stream->streamfile) >> 4) & 0xf; - shift_factor = ((uint8_t)read_8bit(frame_offset+0x00,stream->streamfile) >> 0) & 0xf; + frame_offset = stream->offset + bytes_per_frame * frames_in; + read_streamfile(frame, frame_offset, bytes_per_frame, stream->streamfile); /* ignore EOF errors */ + coef_index = (frame[0] >> 4) & 0xf; + shift_factor = (frame[0] >> 0) & 0xf; - VGM_ASSERT_ONCE(coef_index > 5 || shift_factor > 12, "PS-ADPCM: incorrect coefs/shift at %x\n", (uint32_t)frame_offset); + VGM_ASSERT_ONCE(coef_index > 5 || shift_factor > 12, "PS-ADPCM-piv: incorrect coefs/shift\n"); if (coef_index > 5) /* just in case */ coef_index = 5; if (shift_factor > 12) /* same */ shift_factor = 12; + scale = (float)(1.0 / (double)(1 << shift_factor)); + /* decode nibbles */ for (i = first_sample; i < first_sample + samples_to_do; i++) { int32_t sample = 0; - uint8_t nibbles = (uint8_t)read_8bit(frame_offset+0x01+i/2,stream->streamfile); + uint8_t nibbles = frame[0x01 + i/2]; sample = !(i&1) ? /* low nibble first */ (nibbles >> 0) & 0x0f : diff --git a/Frameworks/vgmstream/vgmstream/src/coding/xa_decoder.c b/Frameworks/vgmstream/vgmstream/src/coding/xa_decoder.c index 58c3c21a0..a19bad611 100644 --- a/Frameworks/vgmstream/vgmstream/src/coding/xa_decoder.c +++ b/Frameworks/vgmstream/vgmstream/src/coding/xa_decoder.c @@ -6,11 +6,13 @@ // May be implemented like the SNES/SPC700 BRR. /* XA ADPCM gain values */ -static const double K0[4] = { 0.0, 0.9375, 1.796875, 1.53125 }; -static const double K1[4] = { 0.0, 0.0, -0.8125,-0.859375}; -/* K0/1 floats to int, K*2^10 = K*(1<<10) = K*1024 */ -static int get_IK0(int fid) { return ((int)((-K0[fid]) * (1 << 10))); } -static int get_IK1(int fid) { return ((int)((-K1[fid]) * (1 << 10))); } +#if 0 +static const float K0[4] = { 0.0, 0.9375, 1.796875, 1.53125 }; +static const float K1[4] = { 0.0, 0.0, -0.8125, -0.859375 }; +#endif +/* K0/1 floats to int, -K*2^10 = -K*(1<<10) = -K*1024 */ +static const int IK0[4] = { 0, -960, -1840, -1568 }; +static const int IK1[4] = { 0, 0, 832, 880 }; /* Sony XA ADPCM, defined for CD-DA/CD-i in the "Red Book" (private) or "Green Book" (public) specs. * The algorithm basically is BRR (Bit Rate Reduction) from the SNES SPC700, while the data layout is new. @@ -35,23 +37,22 @@ static int get_IK1(int fid) { return ((int)((-K1[fid]) * (1 << 10))); } * int coef tables commonly use N = 6 or 8, so K0 0.9375*64 = 60 or 0.9375*256 = 240 * PS1 XA is apparently upsampled and interpolated to 44100, vgmstream doesn't simulate this. * + * XA has an 8-bit decoding and "emphasis" modes, that no PS1 game actually uses, but apparently + * are supported by the CD hardware and will play if found. + * * Info (Green Book): https://www.lscdweb.com/data/downloadables/2/8/cdi_may94_r2.pdf * BRR info (no$sns): http://problemkaputt.de/fullsnes.htm#snesapudspbrrsamples - * (bsnes): https://gitlab.com/higan/higan/blob/master/higan/sfc/dsp/brr.cpp + * (bsnes): https://github.com/byuu/bsnes/blob/master/bsnes/sfc/dsp/SPC_DSP.cpp#L316 */ -void decode_xa(VGMSTREAMCHANNEL * stream, sample * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, int channel) { - off_t frame_offset, sp_offset; - int i,j, frames_in, samples_done = 0, sample_count = 0; +void decode_xa(VGMSTREAMCHANNEL * stream, sample_t * outbuf, int channelspacing, int32_t first_sample, int32_t samples_to_do, int channel) { + uint8_t frame[0x80] = {0}; + off_t frame_offset; + int i,j, sp_pos, frames_in, samples_done = 0, sample_count = 0; size_t bytes_per_frame, samples_per_frame; int32_t hist1 = stream->adpcm_history1_32; int32_t hist2 = stream->adpcm_history2_32; - /* external interleave (fixed size), mono/stereo */ - bytes_per_frame = 0x80; - samples_per_frame = 28*8 / channelspacing; - frames_in = first_sample / samples_per_frame; - first_sample = first_sample % samples_per_frame; /* data layout (mono): * - CD-XA audio is divided into sectors ("audio blocks"), each with 18 size 0x80 frames @@ -72,12 +73,19 @@ void decode_xa(VGMSTREAMCHANNEL * stream, sample * outbuf, int channelspacing, i * ... * subframe 7: header @ 0x0b or 0x0f, 28 nibbles (high) @ 0x13,17,1b,1f,23 ... 7f */ - frame_offset = stream->offset + bytes_per_frame*frames_in; - if (read_32bitBE(frame_offset+0x00,stream->streamfile) != read_32bitBE(frame_offset+0x04,stream->streamfile) || - read_32bitBE(frame_offset+0x08,stream->streamfile) != read_32bitBE(frame_offset+0x0c,stream->streamfile)) { - VGM_LOG("bad frames at %x\n", (uint32_t)frame_offset); - } + /* external interleave (fixed size), mono/stereo */ + bytes_per_frame = 0x80; + samples_per_frame = 28*8 / channelspacing; + frames_in = first_sample / samples_per_frame; + first_sample = first_sample % samples_per_frame; + + /* parse frame header */ + frame_offset = stream->offset + bytes_per_frame * frames_in; + read_streamfile(frame, frame_offset, bytes_per_frame, stream->streamfile); /* ignore EOF errors */ + + VGM_ASSERT(get_32bitBE(frame+0x0) != get_32bitBE(frame+0x4) || get_32bitBE(frame+0x8) != get_32bitBE(frame+0xC), + "bad frames at %x\n", (uint32_t)frame_offset); /* decode subframes */ @@ -86,18 +94,18 @@ void decode_xa(VGMSTREAMCHANNEL * stream, sample * outbuf, int channelspacing, i uint8_t coef_index, shift_factor; /* parse current subframe (sound unit)'s header (sound parameters) */ - sp_offset = frame_offset + 0x04 + i*channelspacing + channel; - coef_index = ((uint8_t)read_8bit(sp_offset,stream->streamfile) >> 4) & 0xf; - shift_factor = ((uint8_t)read_8bit(sp_offset,stream->streamfile) >> 0) & 0xf; + sp_pos = 0x04 + i*channelspacing + channel; + coef_index = (frame[sp_pos] >> 4) & 0xf; + shift_factor = (frame[sp_pos] >> 0) & 0xf; - VGM_ASSERT(coef_index > 4 || shift_factor > 12, "XA: incorrect coefs/shift at %x\n", (uint32_t)sp_offset); + VGM_ASSERT(coef_index > 4 || shift_factor > 12, "XA: incorrect coefs/shift at %x\n", (uint32_t)frame_offset + sp_pos); if (coef_index > 4) coef_index = 0; /* only 4 filters are used, rest is apparently 0 */ if (shift_factor > 12) shift_factor = 9; /* supposedly, from Nocash PSX docs */ - coef1 = get_IK0(coef_index); - coef2 = get_IK1(coef_index); + coef1 = IK0[coef_index]; + coef2 = IK1[coef_index]; /* decode subframe nibbles */ @@ -105,9 +113,9 @@ void decode_xa(VGMSTREAMCHANNEL * stream, sample * outbuf, int channelspacing, i uint8_t nibbles; int32_t new_sample; - off_t su_offset = (channelspacing==1) ? - frame_offset + 0x10 + j*0x04 + (i/2) : /* mono */ - frame_offset + 0x10 + j*0x04 + i; /* stereo */ + int su_pos = (channelspacing==1) ? + 0x10 + j*0x04 + (i/2) : /* mono */ + 0x10 + j*0x04 + i; /* stereo */ int get_high_nibble = (channelspacing==1) ? (i&1) : /* mono (even subframes = low, off subframes = high) */ (channel == 1); /* stereo (L channel / even subframes = low, R channel / odd subframes = high) */ @@ -118,11 +126,11 @@ void decode_xa(VGMSTREAMCHANNEL * stream, sample * outbuf, int channelspacing, i continue; } - nibbles = (uint8_t)read_8bit(su_offset,stream->streamfile); + nibbles = frame[su_pos]; new_sample = get_high_nibble ? (nibbles >> 4) & 0x0f : - (nibbles ) & 0x0f; + (nibbles >> 0) & 0x0f; new_sample = (int16_t)((new_sample << 12) & 0xf000) >> shift_factor; /* 16b sign extend + scale */ new_sample = new_sample << 4; diff --git a/Frameworks/vgmstream/vgmstream/src/formats.c b/Frameworks/vgmstream/vgmstream/src/formats.c index 4f55ac7e2..dbfc42833 100644 --- a/Frameworks/vgmstream/vgmstream/src/formats.c +++ b/Frameworks/vgmstream/vgmstream/src/formats.c @@ -1,4 +1,5 @@ #include "vgmstream.h" +#include "coding/coding.h" /* Defines the list of accepted extensions. vgmstream doesn't use it internally so it's here @@ -282,6 +283,7 @@ static const char* extension_list[] = { "mihb", "mnstr", "mogg", + //"mp+", //common [Moonshine Runners (PC)] //"mp2", //common //"mp3", //common //"mp4", //common @@ -584,6 +586,7 @@ static const char* common_extension_list[] = { "bin", //common "flac", //common "gsf", //conflicts with GBA gsf plugins? + "mp+", //common [Moonshine Runners (PC)] "mp2", //common "mp3", //common "mp4", //common @@ -942,6 +945,7 @@ static const meta_info meta_info_list[] = { {meta_XMU, "Outrage XMU header"}, {meta_XVAS, "Konami .XVAS header"}, {meta_PS2_XA2, "Acclaim XA2 Header"}, + {meta_SAP, "VING .SAP header"}, {meta_DC_IDVI, "Capcom IDVI header"}, {meta_KRAW, "Geometry Wars: Galaxies KRAW header"}, {meta_NGC_YMF, "YMF DSP Header"}, @@ -1259,22 +1263,10 @@ void get_vgmstream_coding_description(VGMSTREAM *vgmstream, char *out, size_t ou switch (vgmstream->coding_type) { #ifdef VGM_USE_FFMPEG case coding_FFmpeg: - { - ffmpeg_codec_data *data = vgmstream->codec_data; - - if (data) { - if (data->codec && data->codec->long_name) { - description = data->codec->long_name; - } else if (data->codec && data->codec->name) { - description = data->codec->name; - } else { - description = "FFmpeg (unknown codec)"; - } - } else { + description = ffmpeg_get_codec_name(vgmstream->codec_data); + if (description == NULL) description = "FFmpeg"; - } break; - } #endif default: list_length = sizeof(coding_info_list) / sizeof(coding_info); diff --git a/Frameworks/vgmstream/vgmstream/src/meta/acb.c b/Frameworks/vgmstream/vgmstream/src/meta/acb.c index 6207099fc..f25ed1d0c 100644 --- a/Frameworks/vgmstream/vgmstream/src/meta/acb.c +++ b/Frameworks/vgmstream/vgmstream/src/meta/acb.c @@ -70,19 +70,51 @@ fail: /* ************************************** */ +#define ACB_TABLE_BUFFER_SIZE 0x4000 + +STREAMFILE* setup_acb_streamfile(STREAMFILE *streamFile, size_t buffer_size) { + STREAMFILE *temp_streamFile = NULL, *new_streamFile = NULL; + + new_streamFile = open_wrap_streamfile(streamFile); + if (!new_streamFile) goto fail; + temp_streamFile = new_streamFile; + + new_streamFile = open_buffer_streamfile(temp_streamFile, buffer_size); + if (!new_streamFile) goto fail; + temp_streamFile = new_streamFile; + + return temp_streamFile; + +fail: + close_streamfile(temp_streamFile); + return NULL; +} + + typedef struct { + STREAMFILE *acbFile; /* original reference, don't close */ + /* keep track of these tables so they can be closed when done */ utf_context *Header; + utf_context *CueNameTable; utf_context *CueTable; utf_context *BlockTable; utf_context *SequenceTable; utf_context *TrackTable; - utf_context *TrackEventTable; - utf_context *CommandTable; + utf_context *TrackCommandTable; utf_context *SynthTable; utf_context *WaveformTable; + STREAMFILE *CueNameSf; + STREAMFILE *CueSf; + STREAMFILE *BlockSf; + STREAMFILE *SequenceSf; + STREAMFILE *TrackSf; + STREAMFILE *TrackCommandSf; + STREAMFILE *SynthSf; + STREAMFILE *WaveformSf; + /* config */ int is_memory; int target_waveid; @@ -102,16 +134,21 @@ typedef struct { } acb_header; -static int load_utf_subtable(STREAMFILE *acbFile, acb_header* acb, utf_context* *Table, const char* TableName, int* rows) { +static int open_utf_subtable(acb_header* acb, STREAMFILE* *TableSf, utf_context* *Table, const char* TableName, int* rows) { uint32_t offset = 0; /* already loaded */ if (*Table != NULL) return 1; - if (!utf_query_data(acbFile, acb->Header, 0, TableName, &offset, NULL)) + if (!utf_query_data(acb->acbFile, acb->Header, 0, TableName, &offset, NULL)) goto fail; - *Table = utf_open(acbFile, offset, rows, NULL); + + /* open a buffered streamfile to avoid so much IO back and forth between all the tables */ + *TableSf = setup_acb_streamfile(acb->acbFile, ACB_TABLE_BUFFER_SIZE); + if (!*TableSf) goto fail; + + *Table = utf_open(*TableSf, offset, rows, NULL); if (!*Table) goto fail; //;VGM_LOG("ACB: loaded table %s\n", TableName); @@ -121,7 +158,7 @@ fail: } -static void add_acb_name(STREAMFILE *acbFile, acb_header* acb, int8_t Waveform_Streaming) { +static void add_acb_name(acb_header* acb, int8_t Waveform_Streaming) { //todo safe string ops /* ignore name repeats */ @@ -154,23 +191,23 @@ static void add_acb_name(STREAMFILE *acbFile, acb_header* acb, int8_t Waveform_S } -static int load_acb_waveform(STREAMFILE *acbFile, acb_header* acb, int16_t Index) { +static int load_acb_waveform(acb_header* acb, int16_t Index) { int16_t Waveform_Id; int8_t Waveform_Streaming; /* read Waveform[Index] */ - if (!load_utf_subtable(acbFile, acb, &acb->WaveformTable, "WaveformTable", NULL)) + if (!open_utf_subtable(acb, &acb->WaveformSf, &acb->WaveformTable, "WaveformTable", NULL)) goto fail; - if (!utf_query_s16(acbFile, acb->WaveformTable, Index, "Id", &Waveform_Id)) { /* older versions use Id */ + if (!utf_query_s16(acb->WaveformSf, acb->WaveformTable, Index, "Id", &Waveform_Id)) { /* older versions use Id */ if (acb->is_memory) { - if (!utf_query_s16(acbFile, acb->WaveformTable, Index, "MemoryAwbId", &Waveform_Id)) + if (!utf_query_s16(acb->WaveformSf, acb->WaveformTable, Index, "MemoryAwbId", &Waveform_Id)) goto fail; } else { - if (!utf_query_s16(acbFile, acb->WaveformTable, Index, "StreamAwbId", &Waveform_Id)) + if (!utf_query_s16(acb->WaveformSf, acb->WaveformTable, Index, "StreamAwbId", &Waveform_Id)) goto fail; } } - if (!utf_query_s8(acbFile, acb->WaveformTable, Index, "Streaming", &Waveform_Streaming)) + if (!utf_query_s8(acb->WaveformSf, acb->WaveformTable, Index, "Streaming", &Waveform_Streaming)) goto fail; //;VGM_LOG("ACB: Waveform[%i]: Id=%i, Streaming=%i\n", Index, Waveform_Id, Waveform_Streaming); @@ -182,7 +219,7 @@ static int load_acb_waveform(STREAMFILE *acbFile, acb_header* acb, int16_t Index return 1; /* aaand finally get name (phew) */ - add_acb_name(acbFile, acb, Waveform_Streaming); + add_acb_name(acb, Waveform_Streaming); return 1; fail: @@ -190,9 +227,9 @@ fail: } /* define here for Synths pointing to Sequences */ -static int load_acb_sequence(STREAMFILE *acbFile, acb_header* acb, int16_t Index); +static int load_acb_sequence(acb_header* acb, int16_t Index); -static int load_acb_synth(STREAMFILE *acbFile, acb_header* acb, int16_t Index) { +static int load_acb_synth(acb_header* acb, int16_t Index) { int i, count; int8_t Synth_Type; uint32_t Synth_ReferenceItems_offset; @@ -200,11 +237,11 @@ static int load_acb_synth(STREAMFILE *acbFile, acb_header* acb, int16_t Index) { /* read Synth[Index] */ - if (!load_utf_subtable(acbFile, acb, &acb->SynthTable, "SynthTable", NULL)) + if (!open_utf_subtable(acb, &acb->SynthSf, &acb->SynthTable, "SynthTable", NULL)) goto fail; - if (!utf_query_s8(acbFile, acb->SynthTable, Index, "Type", &Synth_Type)) + if (!utf_query_s8(acb->SynthSf, acb->SynthTable, Index, "Type", &Synth_Type)) goto fail; - if (!utf_query_data(acbFile, acb->SynthTable, Index, "ReferenceItems", &Synth_ReferenceItems_offset, &Synth_ReferenceItems_size)) + if (!utf_query_data(acb->SynthSf, acb->SynthTable, Index, "ReferenceItems", &Synth_ReferenceItems_offset, &Synth_ReferenceItems_size)) goto fail; //;VGM_LOG("ACB: Synth[%i]: Type=%x, ReferenceItems={%x,%x}\n", Index, Synth_Type, Synth_ReferenceItems_offset, Synth_ReferenceItems_size); @@ -232,8 +269,8 @@ static int load_acb_synth(STREAMFILE *acbFile, acb_header* acb, int16_t Index) { count = Synth_ReferenceItems_size / 0x04; for (i = 0; i < count; i++) { - uint16_t Synth_ReferenceItem_type = read_u16be(Synth_ReferenceItems_offset + i*0x04 + 0x00, acbFile); - uint16_t Synth_ReferenceItem_index = read_u16be(Synth_ReferenceItems_offset + i*0x04 + 0x02, acbFile); + uint16_t Synth_ReferenceItem_type = read_u16be(Synth_ReferenceItems_offset + i*0x04 + 0x00, acb->SynthSf); + uint16_t Synth_ReferenceItem_index = read_u16be(Synth_ReferenceItems_offset + i*0x04 + 0x02, acb->SynthSf); //;VGM_LOG("ACB: Synth.ReferenceItem: type=%x, index=%x\n", Synth_ReferenceItem_type, Synth_ReferenceItem_index); switch(Synth_ReferenceItem_type) { @@ -242,17 +279,17 @@ static int load_acb_synth(STREAMFILE *acbFile, acb_header* acb, int16_t Index) { break; case 0x01: /* Waveform (most common) */ - if (!load_acb_waveform(acbFile, acb, Synth_ReferenceItem_index)) + if (!load_acb_waveform(acb, Synth_ReferenceItem_index)) goto fail; break; case 0x02: /* Synth, possibly random (rare, found in Sonic Lost World with ReferenceType 2) */ - if (!load_acb_synth(acbFile, acb, Synth_ReferenceItem_index)) + if (!load_acb_synth(acb, Synth_ReferenceItem_index)) goto fail; break; case 0x03: /* Sequence of Synths w/ % in Synth.TrackValues (rare, found in Sonic Lost World with ReferenceType 2) */ - if (!load_acb_sequence(acbFile, acb, Synth_ReferenceItem_index)) + if (!load_acb_sequence(acb, Synth_ReferenceItem_index)) goto fail; break; @@ -271,33 +308,33 @@ fail: return 0; } -static int load_acb_track_event_command(STREAMFILE *acbFile, acb_header* acb, int16_t Index) { +static int load_acb_track_event_command(acb_header* acb, int16_t Index) { int16_t Track_EventIndex; uint32_t Track_Command_offset; uint32_t Track_Command_size; /* read Track[Index] */ - if (!load_utf_subtable(acbFile, acb, &acb->TrackTable, "TrackTable", NULL)) + if (!open_utf_subtable(acb, &acb->TrackSf, &acb->TrackTable, "TrackTable", NULL)) goto fail; - if (!utf_query_s16(acbFile, acb->TrackTable, Index, "EventIndex", &Track_EventIndex)) + if (!utf_query_s16(acb->TrackSf, acb->TrackTable, Index, "EventIndex", &Track_EventIndex)) goto fail; //;VGM_LOG("ACB: Track[%i]: EventIndex=%i\n", Index, Track_EventIndex); /* next link varies with version, check by table existence */ if (acb->has_CommandTable) { /* <=v1.27 */ /* read Command[EventIndex] */ - if (!load_utf_subtable(acbFile, acb, &acb->CommandTable, "CommandTable", NULL)) + if (!open_utf_subtable(acb, &acb->TrackCommandSf, &acb->TrackCommandTable, "CommandTable", NULL)) goto fail; - if (!utf_query_data(acbFile, acb->CommandTable, Track_EventIndex, "Command", &Track_Command_offset, &Track_Command_size)) + if (!utf_query_data(acb->TrackCommandSf, acb->TrackCommandTable, Track_EventIndex, "Command", &Track_Command_offset, &Track_Command_size)) goto fail; //;VGM_LOG("ACB: Command[%i]: Command={%x,%x}\n", Track_EventIndex, Track_Command_offset,Track_Command_size); } else if (acb->has_TrackEventTable) { /* >=v1.28 */ /* read TrackEvent[EventIndex] */ - if (!load_utf_subtable(acbFile, acb, &acb->TrackEventTable, "TrackEventTable", NULL)) + if (!open_utf_subtable(acb, &acb->TrackCommandSf, &acb->TrackCommandTable, "TrackEventTable", NULL)) goto fail; - if (!utf_query_data(acbFile, acb->TrackEventTable, Track_EventIndex, "Command", &Track_Command_offset, &Track_Command_size)) + if (!utf_query_data(acb->TrackCommandSf, acb->TrackCommandTable, Track_EventIndex, "Command", &Track_Command_offset, &Track_Command_size)) goto fail; //;VGM_LOG("ACB: TrackEvent[%i]: Command={%x,%x}\n", Track_EventIndex, Track_Command_offset,Track_Command_size); } @@ -315,8 +352,8 @@ static int load_acb_track_event_command(STREAMFILE *acbFile, acb_header* acb, in while (offset < max_offset) { - tlv_code = read_u16be(offset + 0x00, acbFile); - tlv_size = read_u8 (offset + 0x02, acbFile); + tlv_code = read_u16be(offset + 0x00, acb->TrackCommandSf); + tlv_size = read_u8 (offset + 0x02, acb->TrackCommandSf); offset += 0x03; if (tlv_code == 0x07D0) { @@ -325,20 +362,20 @@ static int load_acb_track_event_command(STREAMFILE *acbFile, acb_header* acb, in break; } - tlv_type = read_u16be(offset + 0x00, acbFile); - tlv_index = read_u16be(offset + 0x02, acbFile); + tlv_type = read_u16be(offset + 0x00, acb->TrackCommandSf); + tlv_index = read_u16be(offset + 0x02, acb->TrackCommandSf); //;VGM_LOG("ACB: TLV at %x: type %x, index=%x\n", offset, tlv_type, tlv_index); /* probably same as Synth_ReferenceItem_type */ switch(tlv_type) { case 0x02: /* Synth (common) */ - if (!load_acb_synth(acbFile, acb, tlv_index)) + if (!load_acb_synth(acb, tlv_index)) goto fail; break; case 0x03: /* Sequence of Synths (common, ex. Yakuza 6, Yakuza Kiwami 2) */ - if (!load_acb_sequence(acbFile, acb, tlv_index)) + if (!load_acb_sequence(acb, tlv_index)) goto fail; break; @@ -360,7 +397,7 @@ fail: return 0; } -static int load_acb_sequence(STREAMFILE *acbFile, acb_header* acb, int16_t Index) { +static int load_acb_sequence(acb_header* acb, int16_t Index) { int i; int16_t Sequence_NumTracks; uint32_t Sequence_TrackIndex_offset; @@ -368,11 +405,11 @@ static int load_acb_sequence(STREAMFILE *acbFile, acb_header* acb, int16_t Index /* read Sequence[Index] */ - if (!load_utf_subtable(acbFile, acb, &acb->SequenceTable, "SequenceTable", NULL)) + if (!open_utf_subtable(acb, &acb->SequenceSf, &acb->SequenceTable, "SequenceTable", NULL)) goto fail; - if (!utf_query_s16(acbFile, acb->SequenceTable, Index, "NumTracks", &Sequence_NumTracks)) + if (!utf_query_s16(acb->SequenceSf, acb->SequenceTable, Index, "NumTracks", &Sequence_NumTracks)) goto fail; - if (!utf_query_data(acbFile, acb->SequenceTable, Index, "TrackIndex", &Sequence_TrackIndex_offset, &Sequence_TrackIndex_size)) + if (!utf_query_data(acb->SequenceSf, acb->SequenceTable, Index, "TrackIndex", &Sequence_TrackIndex_offset, &Sequence_TrackIndex_size)) goto fail; //;VGM_LOG("ACB: Sequence[%i]: NumTracks=%i, TrackIndex={%x, %x}\n", Index, Sequence_NumTracks, Sequence_TrackIndex_offset,Sequence_TrackIndex_size); @@ -390,9 +427,9 @@ static int load_acb_sequence(STREAMFILE *acbFile, acb_header* acb, int16_t Index /* read Tracks inside Sequence */ for (i = 0; i < Sequence_NumTracks; i++) { - int16_t Sequence_TrackIndex_index = read_s16be(Sequence_TrackIndex_offset + i*0x02, acbFile); + int16_t Sequence_TrackIndex_index = read_s16be(Sequence_TrackIndex_offset + i*0x02, acb->SequenceSf); - if (!load_acb_track_event_command(acbFile, acb, Sequence_TrackIndex_index)) + if (!load_acb_track_event_command(acb, Sequence_TrackIndex_index)) goto fail; } @@ -403,7 +440,7 @@ fail: return 0; } -static int load_acb_block(STREAMFILE *acbFile, acb_header* acb, int16_t Index) { +static int load_acb_block(acb_header* acb, int16_t Index) { int i; int16_t Block_NumTracks; uint32_t Block_TrackIndex_offset; @@ -411,11 +448,11 @@ static int load_acb_block(STREAMFILE *acbFile, acb_header* acb, int16_t Index) { /* read Block[Index] */ - if (!load_utf_subtable(acbFile, acb, &acb->BlockTable, "BlockTable", NULL)) + if (!open_utf_subtable(acb, &acb->BlockSf, &acb->BlockTable, "BlockTable", NULL)) goto fail; - if (!utf_query_s16(acbFile, acb->BlockTable, Index, "NumTracks", &Block_NumTracks)) + if (!utf_query_s16(acb->BlockSf, acb->BlockTable, Index, "NumTracks", &Block_NumTracks)) goto fail; - if (!utf_query_data(acbFile, acb->BlockTable, Index, "TrackIndex", &Block_TrackIndex_offset, &Block_TrackIndex_size)) + if (!utf_query_data(acb->BlockSf, acb->BlockTable, Index, "TrackIndex", &Block_TrackIndex_offset, &Block_TrackIndex_size)) goto fail; //;VGM_LOG("ACB: Block[%i]: NumTracks=%i, TrackIndex={%x, %x}\n", Index, Block_NumTracks, Block_TrackIndex_offset,Block_TrackIndex_size); @@ -426,9 +463,9 @@ static int load_acb_block(STREAMFILE *acbFile, acb_header* acb, int16_t Index) { /* read Tracks inside Block */ for (i = 0; i < Block_NumTracks; i++) { - int16_t Block_TrackIndex_index = read_s16be(Block_TrackIndex_offset + i*0x02, acbFile); + int16_t Block_TrackIndex_index = read_s16be(Block_TrackIndex_offset + i*0x02, acb->BlockSf); - if (!load_acb_track_event_command(acbFile, acb, Block_TrackIndex_index)) + if (!load_acb_track_event_command(acb, Block_TrackIndex_index)) goto fail; } @@ -438,17 +475,17 @@ fail: } -static int load_acb_cue(STREAMFILE *acbFile, acb_header* acb, int16_t Index) { +static int load_acb_cue(acb_header* acb, int16_t Index) { int8_t Cue_ReferenceType; int16_t Cue_ReferenceIndex; /* read Cue[Index] */ - if (!load_utf_subtable(acbFile, acb, &acb->CueTable, "CueTable", NULL)) + if (!open_utf_subtable(acb, &acb->CueSf, &acb->CueTable, "CueTable", NULL)) goto fail; - if (!utf_query_s8 (acbFile, acb->CueTable, Index, "ReferenceType", &Cue_ReferenceType)) + if (!utf_query_s8 (acb->CueSf, acb->CueTable, Index, "ReferenceType", &Cue_ReferenceType)) goto fail; - if (!utf_query_s16(acbFile, acb->CueTable, Index, "ReferenceIndex", &Cue_ReferenceIndex)) + if (!utf_query_s16(acb->CueSf, acb->CueTable, Index, "ReferenceIndex", &Cue_ReferenceIndex)) goto fail; //;VGM_LOG("ACB: Cue[%i]: ReferenceType=%i, ReferenceIndex=%i\n", Index, Cue_ReferenceType, Cue_ReferenceIndex); @@ -457,22 +494,22 @@ static int load_acb_cue(STREAMFILE *acbFile, acb_header* acb, int16_t Index) { switch(Cue_ReferenceType) { case 1: /* Cue > Waveform (ex. PES 2015) */ - if (!load_acb_waveform(acbFile, acb, Cue_ReferenceIndex)) + if (!load_acb_waveform(acb, Cue_ReferenceIndex)) goto fail; break; case 2: /* Cue > Synth > Waveform (ex. Ukiyo no Roushi) */ - if (!load_acb_synth(acbFile, acb, Cue_ReferenceIndex)) + if (!load_acb_synth(acb, Cue_ReferenceIndex)) goto fail; break; case 3: /* Cue > Sequence > Track > Command > Synth > Waveform (ex. Valkyrie Profile anatomia, Yakuza Kiwami 2) */ - if (!load_acb_sequence(acbFile, acb, Cue_ReferenceIndex)) + if (!load_acb_sequence(acb, Cue_ReferenceIndex)) goto fail; break; case 8: /* Cue > Block > Track > Command > Synth > Waveform (ex. Sonic Lost World, rare) */ - if (!load_acb_block(acbFile, acb, Cue_ReferenceIndex)) + if (!load_acb_block(acb, Cue_ReferenceIndex)) goto fail; break; @@ -488,17 +525,17 @@ fail: } -static int load_acb_cuename(STREAMFILE *acbFile, acb_header* acb, int16_t Index) { +static int load_acb_cuename(acb_header* acb, int16_t Index) { int16_t CueName_CueIndex; const char* CueName_CueName; /* read CueName[Index] */ - if (!load_utf_subtable(acbFile, acb, &acb->CueNameTable, "CueNameTable", NULL)) + if (!open_utf_subtable(acb, &acb->CueNameSf, &acb->CueNameTable, "CueNameTable", NULL)) goto fail; - if (!utf_query_s16(acbFile, acb->CueNameTable, Index, "CueIndex", &CueName_CueIndex)) + if (!utf_query_s16(acb->CueNameSf, acb->CueNameTable, Index, "CueIndex", &CueName_CueIndex)) goto fail; - if (!utf_query_string(acbFile, acb->CueNameTable, Index, "CueName", &CueName_CueName)) + if (!utf_query_string(acb->CueNameSf, acb->CueNameTable, Index, "CueName", &CueName_CueName)) goto fail; //;VGM_LOG("ACB: CueName[%i]: CueIndex=%i, CueName=%s\n", Index, CueName_CueIndex, CueName_CueName); @@ -507,7 +544,7 @@ static int load_acb_cuename(STREAMFILE *acbFile, acb_header* acb, int16_t Index) acb->cuename_index = Index; acb->cuename_name = CueName_CueName; - if (!load_acb_cue(acbFile, acb, CueName_CueIndex)) + if (!load_acb_cue(acb, CueName_CueIndex)) goto fail; return 1; @@ -516,12 +553,12 @@ fail: } -void load_acb_wave_name(STREAMFILE *acbFile, VGMSTREAM* vgmstream, int waveid, int is_memory) { +void load_acb_wave_name(STREAMFILE *streamFile, VGMSTREAM* vgmstream, int waveid, int is_memory) { acb_header acb = {0}; int i, CueName_rows; - if (!acbFile || !vgmstream || waveid < 0) + if (!streamFile || !vgmstream || waveid < 0) return; /* Normally games load a .acb + .awb, and asks the .acb to play a cue by name or index. @@ -548,21 +585,23 @@ void load_acb_wave_name(STREAMFILE *acbFile, VGMSTREAM* vgmstream, int waveid, i //;VGM_LOG("ACB: find waveid=%i\n", waveid); - acb.Header = utf_open(acbFile, 0x00, NULL, NULL); + acb.acbFile = streamFile; + + acb.Header = utf_open(acb.acbFile, 0x00, NULL, NULL); if (!acb.Header) goto fail; acb.target_waveid = waveid; acb.is_memory = is_memory; - acb.has_TrackEventTable = utf_query_data(acbFile, acb.Header, 0, "TrackEventTable", NULL,NULL); - acb.has_CommandTable = utf_query_data(acbFile, acb.Header, 0, "CommandTable", NULL,NULL); + acb.has_TrackEventTable = utf_query_data(acb.acbFile, acb.Header, 0, "TrackEventTable", NULL,NULL); + acb.has_CommandTable = utf_query_data(acb.acbFile, acb.Header, 0, "CommandTable", NULL,NULL); /* read all possible cue names and find which waveids are referenced by it */ - if (!load_utf_subtable(acbFile, &acb, &acb.CueNameTable, "CueNameTable", &CueName_rows)) + if (!open_utf_subtable(&acb, &acb.CueNameSf, &acb.CueNameTable, "CueNameTable", &CueName_rows)) goto fail; for (i = 0; i < CueName_rows; i++) { - if (!load_acb_cuename(acbFile, &acb, i)) + if (!load_acb_cuename(&acb, i)) goto fail; } @@ -574,12 +613,20 @@ void load_acb_wave_name(STREAMFILE *acbFile, VGMSTREAM* vgmstream, int waveid, i fail: utf_close(acb.Header); + utf_close(acb.CueNameTable); utf_close(acb.CueTable); utf_close(acb.SequenceTable); utf_close(acb.TrackTable); - utf_close(acb.TrackEventTable); - utf_close(acb.CommandTable); + utf_close(acb.TrackCommandTable); utf_close(acb.SynthTable); utf_close(acb.WaveformTable); + + close_streamfile(acb.CueNameSf); + close_streamfile(acb.CueSf); + close_streamfile(acb.SequenceSf); + close_streamfile(acb.TrackSf); + close_streamfile(acb.TrackCommandSf); + close_streamfile(acb.SynthSf); + close_streamfile(acb.WaveformSf); } diff --git a/Frameworks/vgmstream/vgmstream/src/meta/ffmpeg.c b/Frameworks/vgmstream/vgmstream/src/meta/ffmpeg.c index 3784b7dd4..f8363b0bd 100644 --- a/Frameworks/vgmstream/vgmstream/src/meta/ffmpeg.c +++ b/Frameworks/vgmstream/vgmstream/src/meta/ffmpeg.c @@ -61,6 +61,12 @@ VGMSTREAM * init_vgmstream_ffmpeg_offset(STREAMFILE *streamFile, uint64_t start, num_samples = mpeg_get_samples(streamFile, 0x00, get_streamfile_size(streamFile)); } + /* hack for MPC, that seeks/resets incorrectly due to seek table shenanigans */ + if (read_32bitBE(0x00, streamFile) == 0x4D502B07 || /* "MP+\7" (Musepack V7) */ + read_32bitBE(0x00, streamFile) == 0x4D50434B) { /* "MPCK" (Musepack V8) */ + ffmpeg_set_force_seek(data); + } + /* default but often inaccurate when calculated using bitrate (wrong for VBR) */ if (!num_samples) { num_samples = data->totalSamples; diff --git a/Frameworks/vgmstream/vgmstream/src/meta/hca_keys.h b/Frameworks/vgmstream/vgmstream/src/meta/hca_keys.h index 1ae76b85c..ba97274f4 100644 --- a/Frameworks/vgmstream/vgmstream/src/meta/hca_keys.h +++ b/Frameworks/vgmstream/vgmstream/src/meta/hca_keys.h @@ -294,6 +294,12 @@ static const hcakey_info hcakey_list[] = { /* Uta Macross SmaPho De Culture (Android) */ {396798934275978741}, // 0581B68744C5F5F5 + /* Touhou Cannonball (Android) */ + {5465717035832233}, // 00136B0A6A5D13A9 + + /* Love Live! School idol festival ALL STARS (Android) */ + {6498535309877346413}, // 5A2F6F6F0192806D + /* Dragalia Lost (Cygames) [iOS/Android] */ {2967411924141, subkeys_dgl, sizeof(subkeys_dgl) / sizeof(subkeys_dgl[0]) }, // 000002B2E7889CAD diff --git a/Frameworks/vgmstream/vgmstream/src/meta/sat_sap.c b/Frameworks/vgmstream/vgmstream/src/meta/sat_sap.c index 9e03cc638..ed4a2e618 100644 --- a/Frameworks/vgmstream/vgmstream/src/meta/sat_sap.c +++ b/Frameworks/vgmstream/vgmstream/src/meta/sat_sap.c @@ -1,67 +1,46 @@ #include "meta.h" #include "../util.h" -/* SAP (from Bubble_Symphony) */ +/* SAP - from Bubble Symphony (SAT) */ VGMSTREAM * init_vgmstream_sat_sap(STREAMFILE *streamFile) { VGMSTREAM * vgmstream = NULL; - char filename[PATH_LIMIT]; off_t start_offset; + int num_samples; + int loop_flag = 0, channel_count; - int loop_flag = 0; - int channel_count; - - /* check extension, case insensitive */ - streamFile->get_name(streamFile,filename,sizeof(filename)); - if (strcasecmp("sap",filename_extension(filename))) goto fail; - - - /* check header */ - if (read_32bitBE(0x0A,streamFile) != 0x0010400E) /* "0010400E" */ + /* checks */ + if (!check_extensions(streamFile, "sap")) goto fail; - - loop_flag = 0; /* (read_32bitLE(0x08,streamFile)!=0); */ + num_samples = read_32bitBE(0x00,streamFile); /* first for I/O reasons */ channel_count = read_32bitBE(0x04,streamFile); - - /* build the VGMSTREAM */ + if (channel_count != 1) goto fail; /* unknown layout */ + + if (read_32bitBE(0x08,streamFile) != 0x10) /* bps? */ + goto fail; + if (read_16bitBE(0x0c,streamFile) != 0x400E) /* ? */ + goto fail; + + loop_flag = 0; + start_offset = 0x800; + + + /* build the VGMSTREAM */ vgmstream = allocate_vgmstream(channel_count,loop_flag); if (!vgmstream) goto fail; - /* fill in the vital statistics */ - start_offset = 0x800; - vgmstream->channels = channel_count; + vgmstream->meta_type = meta_SAP; vgmstream->sample_rate = (uint16_t)read_16bitBE(0x0E,streamFile); + vgmstream->num_samples = num_samples; + vgmstream->coding_type = coding_PCM16BE; - vgmstream->num_samples = read_32bitBE(0x00,streamFile); - if (loop_flag) { - vgmstream->loop_start_sample = 0; /* (read_32bitLE(0x08,streamFile)-1)*28; */ - vgmstream->loop_end_sample = read_32bitBE(0x00,streamFile); - } - vgmstream->layout_type = layout_none; - vgmstream->interleave_block_size = 0x10; - vgmstream->meta_type = meta_SAT_SAP; - - /* open the file for reading */ - { - int i; - STREAMFILE * file; - file = streamFile->open(streamFile,filename,STREAMFILE_DEFAULT_BUFFER_SIZE); - if (!file) goto fail; - for (i=0;ich[i].streamfile = file; - - vgmstream->ch[i].channel_start_offset= - vgmstream->ch[i].offset=start_offset+ - vgmstream->interleave_block_size*i; - - } - } + if (!vgmstream_open_stream(vgmstream,streamFile,start_offset)) + goto fail; return vgmstream; - /* clean up anything we may have opened */ fail: - if (vgmstream) close_vgmstream(vgmstream); + close_vgmstream(vgmstream); return NULL; } diff --git a/Frameworks/vgmstream/vgmstream/src/meta/ta_aac.c b/Frameworks/vgmstream/vgmstream/src/meta/ta_aac.c index 8d7ae806d..59388f20d 100644 --- a/Frameworks/vgmstream/vgmstream/src/meta/ta_aac.c +++ b/Frameworks/vgmstream/vgmstream/src/meta/ta_aac.c @@ -139,7 +139,7 @@ VGMSTREAM * init_vgmstream_ta_aac_ps3(STREAMFILE *streamFile) { VGMSTREAM * vgmstream = NULL; off_t start_offset; int loop_flag, channel_count; - uint32_t data_size, loop_start, loop_end, codec_id; + uint32_t data_size, loop_start, loop_end, codec_id, asc_chunk; /* check extension, case insensitive */ /* .aac: expected, .laac/ace: for players to avoid hijacking MP4/AAC */ @@ -149,30 +149,31 @@ VGMSTREAM * init_vgmstream_ta_aac_ps3(STREAMFILE *streamFile) { if (read_32bitBE(0x00, streamFile) != 0x41414320) /* "AAC " */ goto fail; - /* Haven't Found a codec flag yet. Let's just use this for now */ - if (read_32bitBE(0x10000, streamFile) != 0x41534320) /* "ASC " */ + /* Find the ASC chunk, That's where the goodies are */ + asc_chunk = read_32bitBE(0x40, streamFile); + if (read_32bitBE(asc_chunk, streamFile) != 0x41534320) /* "ASC " */ goto fail; - if (read_32bitBE(0x10104, streamFile) != 0xFFFFFFFF) + if (read_32bitBE(asc_chunk+0x104, streamFile) != 0xFFFFFFFF) loop_flag = 1; else loop_flag = 0; - channel_count = read_32bitBE(0x100F4, streamFile); - codec_id = read_32bitBE(0x100F0, streamFile); + channel_count = read_32bitBE(asc_chunk + 0xF4, streamFile); + codec_id = read_32bitBE(asc_chunk + 0xF0, streamFile); /* build the VGMSTREAM */ vgmstream = allocate_vgmstream(channel_count, loop_flag); if (!vgmstream) goto fail; - /* Useless header, let's play the guessing game */ - start_offset = 0x10110; - vgmstream->sample_rate = read_32bitBE(0x100FC, streamFile); + /* ASC header */ + start_offset = asc_chunk + 0x110; + vgmstream->sample_rate = read_32bitBE(asc_chunk + 0xFC, streamFile); vgmstream->channels = channel_count; vgmstream->meta_type = meta_TA_AAC_PS3; - data_size = read_32bitBE(0x100F8, streamFile); - loop_start = read_32bitBE(0x10104, streamFile); - loop_end = read_32bitBE(0x10108, streamFile); + data_size = read_32bitBE(asc_chunk + 0xF8, streamFile); + loop_start = read_32bitBE(asc_chunk + 0x104, streamFile); + loop_end = read_32bitBE(asc_chunk + 0x108, streamFile); #ifdef VGM_USE_FFMPEG { diff --git a/Frameworks/vgmstream/vgmstream/src/meta/txth.c b/Frameworks/vgmstream/vgmstream/src/meta/txth.c index ed43222a7..6b0d0ae62 100644 --- a/Frameworks/vgmstream/vgmstream/src/meta/txth.c +++ b/Frameworks/vgmstream/vgmstream/src/meta/txth.c @@ -552,8 +552,15 @@ static VGMSTREAM *init_subfile(txth_header * txth) { STREAMFILE * streamSubfile = NULL; - if (txth->subfile_size == 0) - txth->subfile_size = txth->data_size - txth->subfile_offset; + if (txth->subfile_size == 0) { + if (txth->data_size_set) + txth->subfile_size = txth->data_size; + else + txth->subfile_size = txth->data_size - txth->subfile_offset; + if (txth->subfile_size + txth->subfile_offset > get_streamfile_size(txth->streamBody)) + txth->subfile_size = get_streamfile_size(txth->streamBody) - txth->subfile_offset; + } + if (txth->subfile_extension[0] == '\0') get_streamfile_ext(txth->streamFile,txth->subfile_extension,sizeof(txth->subfile_extension)); @@ -586,7 +593,8 @@ static VGMSTREAM *init_subfile(txth_header * txth) { vgmstream_force_loop(vgmstream, 0, 0, 0); } - if (txth->chunk_count && txth->subsong_count) { + /* assumes won't point to subfiles with subsongs */ + if (/*txth->chunk_count &&*/ txth->subsong_count) { vgmstream->num_streams = txth->subsong_count; } //todo: other combos with subsongs + subfile? @@ -1249,7 +1257,7 @@ static int is_substring(const char * val, const char * cmp, int inline_field) { chr = val[len]; /* "val" can end with math for inline fields (like interleave*0x10) */ - if (inline_field && (chr == '+' || chr == '-' || chr == '*' || chr == '/')) + if (inline_field && (chr == '+' || chr == '-' || chr == '*' || chr == '/' || chr == '&')) return len; /* otherwise "val" ends in space or eof (to tell "interleave" and "interleave_last" apart) */ @@ -1525,7 +1533,7 @@ static int parse_num(STREAMFILE * streamFile, txth_header * txth, const char * v brackets--; n = 1; } - else if (type == '+' || type == '-' || type == '/' || type == '*') { /* op */ + else if (type == '+' || type == '-' || type == '/' || type == '*' || type == '&') { /* op */ op = type; n = 1; } @@ -1593,6 +1601,8 @@ static int parse_num(STREAMFILE * streamFile, txth_header * txth, const char * v else if ((n = is_string_field(val,"loop_end_sample"))) value = txth->loop_end_sample; else if ((n = is_string_field(val,"subsong_count"))) value = txth->subsong_count; else if ((n = is_string_field(val,"subsong_offset"))) value = txth->subsong_offset; + else if ((n = is_string_field(val,"subfile_offset"))) value = txth->subfile_offset; + else if ((n = is_string_field(val,"subfile_size"))) value = txth->subfile_size; //todo whatever, improve else if ((n = is_string_field(val,"name_value"))) value = txth->name_values[0]; else if ((n = is_string_field(val,"name_value1"))) value = txth->name_values[0]; @@ -1624,6 +1634,7 @@ static int parse_num(STREAMFILE * streamFile, txth_header * txth, const char * v case '-': value = result - value; break; case '*': value = result * value; break; case '/': if (value == 0) goto fail; value = result / value; break; + case '&': value = result & value; break; default: break; } op = ' '; /* consume */ diff --git a/Frameworks/vgmstream/vgmstream/src/meta/ubi_hx.c b/Frameworks/vgmstream/vgmstream/src/meta/ubi_hx.c index 705adbcc0..48020a1c8 100644 --- a/Frameworks/vgmstream/vgmstream/src/meta/ubi_hx.c +++ b/Frameworks/vgmstream/vgmstream/src/meta/ubi_hx.c @@ -91,6 +91,45 @@ static void build_readable_name(char * buf, size_t buf_size, ubi_hx_header * hx) snprintf(buf,buf_size, "%s/%i/%08x-%08x/%s", "hx", hx->header_index, hx->cuuid1,hx->cuuid2, grp_name); } +#define TXT_LINE_MAX 0x1000 + +/* get name */ +static int parse_name_bnh(ubi_hx_header * hx, STREAMFILE *sf, uint32_t cuuid1, uint32_t cuuid2) { + STREAMFILE *sf_t; + off_t txt_offset = 0; + char line[TXT_LINE_MAX]; + char cuuid[40]; + + sf_t = open_streamfile_by_ext(sf,"bnh"); + if (sf_t == NULL) goto fail; + + snprintf(cuuid,sizeof(cuuid), "cuuid( 0x%08x, 0x%08x )", cuuid1, cuuid2); + + /* each .bnh line has a cuuid, a bunch of repeated fields and name (sometimes name is filename or "bad name") */ + while (txt_offset < get_streamfile_size(sf)) { + int line_read, bytes_read; + + bytes_read = get_streamfile_text_line(TXT_LINE_MAX,line, txt_offset,sf_t, &line_read); + if (!line_read) break; + txt_offset += bytes_read; + + if (strncmp(line,cuuid,31) != 0) + continue; + if (bytes_read <= 79) + goto fail; + + /* cuuid found, copy name (lines are fixed and always starts from the same position) */ + strcpy(hx->internal_name, &line[79]); + + close_streamfile(sf_t); + return 1; + } + +fail: + close_streamfile(sf_t); + return 0; +} + /* get referenced name from WavRes, using the index again (abridged) */ static int parse_name(ubi_hx_header * hx, STREAMFILE *sf) { @@ -107,6 +146,7 @@ static int parse_name(ubi_hx_header * hx, STREAMFILE *sf) { off_t header_offset; size_t class_size; int j, link_count, language_count, is_found = 0; + uint32_t cuuid1, cuuid2; class_size = read_32bit(offset + 0x00, sf); @@ -114,6 +154,9 @@ static int parse_name(ubi_hx_header * hx, STREAMFILE *sf) { read_string(class_name,class_size+1, offset + 0x04, sf); /* not null-terminated */ offset += 0x04 + class_size; + cuuid1 = (uint32_t)read_32bit(offset + 0x00, sf); + cuuid2 = (uint32_t)read_32bit(offset + 0x04, sf); + header_offset = read_32bit(offset + 0x08, sf); offset += 0x10; @@ -159,10 +202,18 @@ static int parse_name(ubi_hx_header * hx, STREAMFILE *sf) { resclass_size = read_32bit(wavres_offset, sf); wavres_offset += 0x04 + resclass_size + 0x08 + 0x04; /* skip class + cuiid + flags */ - internal_size = read_32bit(wavres_offset + 0x00, sf); /* usually 0 in consoles */ + internal_size = read_32bit(wavres_offset + 0x00, sf); if (internal_size > sizeof(hx->internal_name)+1) goto fail; - read_string(hx->internal_name,internal_size+1, wavres_offset + 0x04, sf); - return 1; + + /* usually 0 in consoles */ + if (internal_size != 0) { + read_string(hx->internal_name,internal_size+1, wavres_offset + 0x04, sf); + return 1; + } + else { + parse_name_bnh(hx, sf, cuuid1, cuuid2); + return 1; /* ignore error */ + } } } @@ -181,7 +232,7 @@ static int parse_header(ubi_hx_header * hx, STREAMFILE *sf, off_t offset, size_t //todo cleanup/unify common readings - //;VGM_LOG("UBI HX: header class %s, o=%lx, s=%x\n\n", class_name, header_offset, header_size); + //;VGM_LOG("UBI HX: header o=%lx, s=%x\n\n", offset, size); hx->header_index = index; hx->header_offset = offset; @@ -307,6 +358,8 @@ static int parse_header(ubi_hx_header * hx, STREAMFILE *sf, off_t offset, size_t hx->stream_size = read_32bit(offset + 0x04, sf); offset += 0x08; + //todo some dummy files have 0 size + if (read_32bit(offset + 0x00, sf) != 0x01) goto fail; /* 0x04: some kind of parent id shared by multiple Waves, or 0 */ offset += 0x08; @@ -454,6 +507,10 @@ static int parse_hx(ubi_hx_header * hx, STREAMFILE *sf, int target_subsong) { } //todo figure out CProgramResData sequences + // Format is pretty complex list of values and some offsets in between, then field names + // then more values and finally a list of linked IDs Links are the same as in the index, + // but doesn't seem to be a straight sequence list. Seems it can be used for other config too. + /* identify all possible names so unknown platforms fail */ if (strcmp(class_name, "CEventResData") == 0 || /* play/stop/etc event */ strcmp(class_name, "CProgramResData") == 0 || /* some kind of map/object-like config to make sequences in some cases? */ diff --git a/Frameworks/vgmstream/vgmstream/src/meta/xwb.c b/Frameworks/vgmstream/vgmstream/src/meta/xwb.c index faa9237aa..244218609 100644 --- a/Frameworks/vgmstream/vgmstream/src/meta/xwb.c +++ b/Frameworks/vgmstream/vgmstream/src/meta/xwb.c @@ -643,20 +643,71 @@ fail: return 0; } +static int get_wbh_name(char* buf, size_t maxsize, int target_subsong, xwb_header* xwb, STREAMFILE* sf) { + int selected_stream = target_subsong - 1; + int version, name_count; + off_t offset, name_number; + + if (read_32bitBE(0x00, sf) != 0x57424844) /* "WBHD" */ + goto fail; + version = read_32bitLE(0x04, sf); + if (version != 1) + goto fail; + name_count = read_32bitLE(0x08, sf); + + if (selected_stream > name_count) + goto fail; + + /* next table: + * - 0x00: wave id? (ordered from 0 to N) + * - 0x04: always 0 */ + offset = 0x10 + 0x08 * name_count; + + name_number = 0; + while (offset < get_streamfile_size(sf)) { + size_t name_len = read_string(buf, maxsize, offset, sf) + 1; + + if (name_len == 0) + goto fail; + if (name_number == selected_stream) + break; + + name_number++; + offset += name_len; + } + + return 1; +fail: + return 0; +} + static void get_name(char * buf, size_t maxsize, int target_subsong, xwb_header * xwb, STREAMFILE *streamXwb) { - STREAMFILE *streamXsb = NULL; + STREAMFILE *sf_name = NULL; int name_found; /* try to get the stream name in the .xwb, though they are very rarely included */ name_found = get_xwb_name(buf, maxsize, target_subsong, xwb, streamXwb); if (name_found) return; - /* try again in a companion .xsb file, a comically complex cue format */ - streamXsb = open_xsb_filename_pair(streamXwb); - if (!streamXsb) return; /* not all xwb have xsb though */ + /* try again in a companion files */ + + if (xwb->version == 1) { + /* .wbh, a simple name container */ + sf_name = open_streamfile_by_ext(streamXwb, "wbh"); + if (!sf_name) return; /* rarely found [Pac-Man World 2 (Xbox)] */ + + name_found = get_wbh_name(buf, maxsize, target_subsong, xwb, sf_name); + close_streamfile(sf_name); + } + else { + /* .xsb, a comically complex cue format */ + sf_name = open_xsb_filename_pair(streamXwb); + if (!sf_name) return; /* not all xwb have xsb though */ + + name_found = get_xsb_name(buf, maxsize, target_subsong, xwb, sf_name); + close_streamfile(sf_name); + } - name_found = get_xsb_name(buf, maxsize, target_subsong, xwb, streamXsb); - close_streamfile(streamXsb); if (!name_found) { buf[0] = '\0'; diff --git a/Frameworks/vgmstream/vgmstream/src/vgmstream.c b/Frameworks/vgmstream/vgmstream/src/vgmstream.c index b72235224..233472ea0 100644 --- a/Frameworks/vgmstream/vgmstream/src/vgmstream.c +++ b/Frameworks/vgmstream/vgmstream/src/vgmstream.c @@ -1095,6 +1095,11 @@ void render_vgmstream(sample_t * buffer, int32_t sample_count, VGMSTREAM * vgmst /* Get the number of samples of a single frame (smallest self-contained sample group, 1/N channels) */ int get_vgmstream_samples_per_frame(VGMSTREAM * vgmstream) { + /* Value returned here is the max (or less) that vgmstream will ask a decoder per + * "decode_x" call. Decoders with variable samples per frame or internal discard + * may return 0 here and handle arbitrary samples_to_do values internally + * (or some internal sample buffer max too). */ + switch (vgmstream->coding_type) { case coding_CRI_ADX: case coding_CRI_ADX_fixed: @@ -1241,14 +1246,7 @@ int get_vgmstream_samples_per_frame(VGMSTREAM * vgmstream) { #endif #ifdef VGM_USE_FFMPEG case coding_FFmpeg: - if (vgmstream->codec_data) { - ffmpeg_codec_data *data = (ffmpeg_codec_data*)vgmstream->codec_data; - return data->sampleBufferBlock; /* must know the full block size for edge loops */ - } - else { - return 0; - } - break; + return 0; #endif case coding_MTAF: return 128*2; @@ -1495,37 +1493,15 @@ void decode_vgmstream(VGMSTREAM * vgmstream, int samples_written, int samples_to switch (vgmstream->coding_type) { case coding_CRI_ADX: - for (ch = 0; ch < vgmstream->channels; ch++) { - decode_adx(&vgmstream->ch[ch],buffer+samples_written*vgmstream->channels+ch, - vgmstream->channels,vgmstream->samples_into_block,samples_to_do, - vgmstream->interleave_block_size); - } - - break; case coding_CRI_ADX_exp: - for (ch = 0; ch < vgmstream->channels; ch++) { - decode_adx_exp(&vgmstream->ch[ch],buffer+samples_written*vgmstream->channels+ch, - vgmstream->channels,vgmstream->samples_into_block,samples_to_do, - vgmstream->interleave_block_size); - } - - break; case coding_CRI_ADX_fixed: - for (ch = 0; ch < vgmstream->channels; ch++) { - decode_adx_fixed(&vgmstream->ch[ch],buffer+samples_written*vgmstream->channels+ch, - vgmstream->channels,vgmstream->samples_into_block,samples_to_do, - vgmstream->interleave_block_size); - } - - break; case coding_CRI_ADX_enc_8: case coding_CRI_ADX_enc_9: for (ch = 0; ch < vgmstream->channels; ch++) { - decode_adx_enc(&vgmstream->ch[ch],buffer+samples_written*vgmstream->channels+ch, + decode_adx(&vgmstream->ch[ch],buffer+samples_written*vgmstream->channels+ch, vgmstream->channels,vgmstream->samples_into_block,samples_to_do, - vgmstream->interleave_block_size); + vgmstream->interleave_block_size, vgmstream->coding_type); } - break; case coding_NGC_DSP: for (ch = 0; ch < vgmstream->channels; ch++) { @@ -2417,7 +2393,7 @@ void describe_vgmstream(VGMSTREAM * vgmstream, char * desc, int length) { } /* codecs with configurable frame size */ - if (vgmstream->layout_type == layout_none && vgmstream->interleave_block_size > 0) { + if (vgmstream->interleave_block_size > 0) { switch (vgmstream->coding_type) { case coding_MSADPCM: case coding_MSADPCM_int: @@ -2813,6 +2789,23 @@ int vgmstream_open_stream(VGMSTREAM * vgmstream, STREAMFILE *streamFile, off_t s return 1; #endif + if ((vgmstream->coding_type == coding_PSX_cfg || + vgmstream->coding_type == coding_PSX_pivotal) && + (vgmstream->interleave_block_size == 0 || vgmstream->interleave_block_size > 0x50)) { + VGM_LOG("VGMSTREAM: PSX-cfg decoder with wrong frame size %x\n", vgmstream->interleave_block_size); + return 0; + } + + if ((vgmstream->coding_type == coding_CRI_ADX || + vgmstream->coding_type == coding_CRI_ADX_enc_8 || + vgmstream->coding_type == coding_CRI_ADX_enc_9 || + vgmstream->coding_type == coding_CRI_ADX_exp || + vgmstream->coding_type == coding_CRI_ADX_fixed) && + (vgmstream->interleave_block_size == 0 || vgmstream->interleave_block_size > 0x12)) { + VGM_LOG("VGMSTREAM: ADX decoder with wrong frame size %x\n", vgmstream->interleave_block_size); + return 0; + } + /* if interleave is big enough keep a buffer per channel */ if (vgmstream->interleave_block_size * vgmstream->channels >= STREAMFILE_DEFAULT_BUFFER_SIZE) { use_streamfile_per_channel = 1; diff --git a/Frameworks/vgmstream/vgmstream/src/vgmstream.h b/Frameworks/vgmstream/vgmstream/src/vgmstream.h index 536374fe5..b06ae0cc7 100644 --- a/Frameworks/vgmstream/vgmstream/src/vgmstream.h +++ b/Frameworks/vgmstream/vgmstream/src/vgmstream.h @@ -409,7 +409,7 @@ typedef enum { meta_DC_STR, /* SEGA Stream Asset Builder */ meta_DC_STR_V2, /* variant of SEGA Stream Asset Builder */ meta_NGC_BH2PCM, /* Bio Hazard 2 */ - meta_SAT_SAP, /* Bubble Symphony */ + meta_SAP, meta_DC_IDVI, /* Eldorado Gate */ meta_KRAW, /* Geometry Wars - Galaxies */ meta_PS2_OMU, /* PS2 Int file with Header */ @@ -1188,33 +1188,27 @@ typedef struct { uint64_t logical_size; // computed size FFmpeg sees (including fake header) uint64_t header_size; // fake header (parseable by FFmpeg) prepended on reads - uint8_t *header_insert_block; // fake header data (ie. RIFF) + uint8_t* header_block; // fake header data (ie. RIFF) /*** "public" API (read-only) ***/ // stream info int channels; - int bitsPerSample; - int floatingPoint; int sampleRate; int bitrate; // extra info: 0 if unknown or not fixed int64_t totalSamples; // estimated count (may not be accurate for some demuxers) - int64_t blockAlign; // coded block of bytes, counting channels (the block can be joint stereo) - int64_t frameSize; // decoded samples per block int64_t skipSamples; // number of start samples that will be skipped (encoder delay), for looping adjustments int streamCount; // number of FFmpeg audio streams /*** internal state ***/ // config int channel_remap_set; - int channel_remap[32]; /* map of channel > new position */ - int invert_audio_set; + int channel_remap[32]; /* map of channel > new position */ + int invert_floats_set; + int skip_samples_set; /* flag to know skip samples were manually added from vgmstream */ + int force_seek; /* flags for special seeking in faulty formats */ + int bad_init; - // intermediate byte buffer - uint8_t *sampleBuffer; - // max samples we can held (can be less or more than frameSize) - size_t sampleBufferBlock; - // FFmpeg context used for metadata AVCodec *codec; @@ -1224,20 +1218,17 @@ typedef struct { int streamIndex; AVFormatContext *formatCtx; AVCodecContext *codecCtx; - AVFrame *lastDecodedFrame; - AVPacket *lastReadPacket; - int bytesConsumedFromDecodedFrame; - int readNextPacket; - int endOfStream; - int endOfAudio; - int skipSamplesSet; // flag to know skip samples were manually added from vgmstream - - // Seeking is not ideal, so rollback is necessary - int samplesToDiscard; + AVFrame *frame; /* last decoded frame */ + AVPacket *packet; /* last read data packet */ - // Flags for special seeking in faulty formats - int force_seek; - int bad_init; + int read_packet; + int end_of_stream; + int end_of_audio; + + /* sample state */ + int32_t samples_discard; + int32_t samples_consumed; + int32_t samples_filled; } ffmpeg_codec_data; #endif diff --git a/Plugins/FFMPEG/FFMPEG.xcodeproj/project.pbxproj b/Plugins/FFMPEG/FFMPEG.xcodeproj/project.pbxproj index bc336770b..635b91e51 100644 --- a/Plugins/FFMPEG/FFMPEG.xcodeproj/project.pbxproj +++ b/Plugins/FFMPEG/FFMPEG.xcodeproj/project.pbxproj @@ -20,6 +20,7 @@ 8352D49D1CDDB8C0009D16AA /* CoreMedia.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8352D49C1CDDB8C0009D16AA /* CoreMedia.framework */; }; 8352D49F1CDDB8D7009D16AA /* CoreVideo.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8352D49E1CDDB8D7009D16AA /* CoreVideo.framework */; }; 83CD428C1F7878A0000F77BE /* libswresample.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 83CD428B1F78789F000F77BE /* libswresample.a */; }; + 83D2F5892356B210007646ED /* libopus.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 83D2F5882356B210007646ED /* libopus.a */; }; 8D5B49B4048680CD000E48DA /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1058C7ADFEA557BF11CA2CBB /* Cocoa.framework */; }; B09E942F0D747F410064F138 /* FFMPEGDecoder.m in Sources */ = {isa = PBXBuildFile; fileRef = B09E942E0D747F410064F138 /* FFMPEGDecoder.m */; }; /* End PBXBuildFile section */ @@ -43,6 +44,8 @@ 8352D49E1CDDB8D7009D16AA /* CoreVideo.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreVideo.framework; path = System/Library/Frameworks/CoreVideo.framework; sourceTree = SDKROOT; }; 8384913818081F6C00E7332D /* Logging.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Logging.h; path = ../../Utils/Logging.h; sourceTree = ""; }; 83CD428B1F78789F000F77BE /* libswresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libswresample.a; path = ../../ThirdParty/ffmpeg/lib/libswresample.a; sourceTree = ""; }; + 83D2F5862356B1BE007646ED /* libopus.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libopus.a; path = ../../../../../../../usr/local/Cellar/opus/1.3.1/lib/libopus.a; sourceTree = ""; }; + 83D2F5882356B210007646ED /* libopus.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libopus.a; path = ../../ThirdParty/ffmpeg/lib/libopus.a; sourceTree = ""; }; 8D5B49B6048680CD000E48DA /* FFMPEG.bundle */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = FFMPEG.bundle; sourceTree = BUILT_PRODUCTS_DIR; }; 8D5B49B7048680CD000E48DA /* Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; B09E942D0D747F410064F138 /* FFMPEGDecoder.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = FFMPEGDecoder.h; sourceTree = ""; }; @@ -66,6 +69,7 @@ 8352D49B1CDDB8B2009D16AA /* VideoToolbox.framework in Frameworks */, 8352D48F1CDDB023009D16AA /* CoreFoundation.framework in Frameworks */, 8D5B49B4048680CD000E48DA /* Cocoa.framework in Frameworks */, + 83D2F5892356B210007646ED /* libopus.a in Frameworks */, 8352D48C1CDDAEDD009D16AA /* libavformat.a in Frameworks */, 8352D48D1CDDAEDD009D16AA /* libavutil.a in Frameworks */, 8352D48B1CDDAEDD009D16AA /* libavcodec.a in Frameworks */, @@ -86,6 +90,7 @@ 089C1671FE841209C02AAC07 /* Frameworks and Libraries */, 19C28FB8FE9D52D311CA2CBB /* Products */, B09E95F50D74A3ED0064F138 /* Frameworks-Info.plist */, + 83D2F5852356B1BD007646ED /* Frameworks */, ); name = FFMPEG; sourceTree = ""; @@ -165,6 +170,15 @@ name = "Other Sources"; sourceTree = ""; }; + 83D2F5852356B1BD007646ED /* Frameworks */ = { + isa = PBXGroup; + children = ( + 83D2F5882356B210007646ED /* libopus.a */, + 83D2F5862356B1BE007646ED /* libopus.a */, + ); + name = Frameworks; + sourceTree = ""; + }; /* End PBXGroup section */ /* Begin PBXNativeTarget section */ @@ -252,6 +266,10 @@ GCC_PREFIX_HEADER = FFMPEG_Prefix.pch; INFOPLIST_FILE = Info.plist; INSTALL_PATH = "$(HOME)/Library/Bundles"; + LIBRARY_SEARCH_PATHS = ( + "$(inherited)", + /usr/local/Cellar/opus/1.3.1/lib, + ); PRODUCT_BUNDLE_IDENTIFIER = org.cogx.ffmpeg; PRODUCT_NAME = FFMPEG; SDKROOT = macosx; @@ -272,6 +290,10 @@ GCC_PREFIX_HEADER = FFMPEG_Prefix.pch; INFOPLIST_FILE = Info.plist; INSTALL_PATH = "$(HOME)/Library/Bundles"; + LIBRARY_SEARCH_PATHS = ( + "$(inherited)", + /usr/local/Cellar/opus/1.3.1/lib, + ); PRODUCT_BUNDLE_IDENTIFIER = org.cogx.ffmpeg; PRODUCT_NAME = FFMPEG; SDKROOT = macosx; diff --git a/Plugins/HighlyComplete/HighlyComplete.xcodeproj/project.pbxproj b/Plugins/HighlyComplete/HighlyComplete.xcodeproj/project.pbxproj index ddd6f580b..97556165b 100644 --- a/Plugins/HighlyComplete/HighlyComplete.xcodeproj/project.pbxproj +++ b/Plugins/HighlyComplete/HighlyComplete.xcodeproj/project.pbxproj @@ -21,10 +21,10 @@ 8384904B180764C200E7332D /* SSEQPlayer.framework in CopyFiles */ = {isa = PBXBuildFile; fileRef = 83848FEC1807624000E7332D /* SSEQPlayer.framework */; settings = {ATTRIBUTES = (CodeSignOnCopy, ); }; }; 83CA2E3D1D7BCF9B00F2EA53 /* mGBA.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 83CA24241D7BC47E00F2EA53 /* mGBA.framework */; }; 83CA2E4D1D7BE41300F2EA53 /* mGBA.framework in CopyFiles */ = {isa = PBXBuildFile; fileRef = 83CA24241D7BC47E00F2EA53 /* mGBA.framework */; settings = {ATTRIBUTES = (CodeSignOnCopy, RemoveHeadersOnCopy, ); }; }; - 83DA153620F6EFD00096D348 /* lazyusf.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 83DA153320F6EFA80096D348 /* lazyusf.framework */; }; - 83DA153720F6EFE10096D348 /* lazyusf.framework in CopyFiles */ = {isa = PBXBuildFile; fileRef = 83DA153320F6EFA80096D348 /* lazyusf.framework */; settings = {ATTRIBUTES = (CodeSignOnCopy, RemoveHeadersOnCopy, ); }; }; 83DE0CBC180B02CC00269051 /* vio2sf.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 83DE0C3A180A9BD500269051 /* vio2sf.framework */; }; 83DE0CBD180B02D800269051 /* vio2sf.framework in CopyFiles */ = {isa = PBXBuildFile; fileRef = 83DE0C3A180A9BD500269051 /* vio2sf.framework */; settings = {ATTRIBUTES = (CodeSignOnCopy, ); }; }; + 83E2F4D123566BC5006F7A41 /* lazyusf2.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 83E2F4CE23566B0C006F7A41 /* lazyusf2.framework */; }; + 83E2F4D223566BD5006F7A41 /* lazyusf2.framework in CopyFiles */ = {isa = PBXBuildFile; fileRef = 83E2F4CE23566B0C006F7A41 /* lazyusf2.framework */; settings = {ATTRIBUTES = (CodeSignOnCopy, RemoveHeadersOnCopy, ); }; }; 83FC32C51BF5AEFB00962B36 /* HighlyExperimental.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 83FC325E1BF5AB9000962B36 /* HighlyExperimental.framework */; }; 83FC32C61BF5AF0600962B36 /* HighlyExperimental.framework in CopyFiles */ = {isa = PBXBuildFile; fileRef = 83FC325E1BF5AB9000962B36 /* HighlyExperimental.framework */; settings = {ATTRIBUTES = (CodeSignOnCopy, RemoveHeadersOnCopy, ); }; }; /* End PBXBuildFile section */ @@ -107,20 +107,6 @@ remoteGlobalIDString = 83CA24121D7BC47C00F2EA53; remoteInfo = mGBA; }; - 83DA153220F6EFA80096D348 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 83DA152E20F6EFA80096D348 /* lazyusf.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = 83C8B62218AF57770071B040; - remoteInfo = lazyusf; - }; - 83DA153420F6EFC90096D348 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 83DA152E20F6EFA80096D348 /* lazyusf.xcodeproj */; - proxyType = 1; - remoteGlobalIDString = 83C8B62118AF57770071B040; - remoteInfo = lazyusf; - }; 83DE0C39180A9BD500269051 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 83DE0C34180A9BD400269051 /* vio2sf.xcodeproj */; @@ -135,6 +121,20 @@ remoteGlobalIDString = 83DE0C05180A9BD400269051; remoteInfo = vio2sf; }; + 83E2F4CD23566B0C006F7A41 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 83E2F4C923566B0C006F7A41 /* lazyusf2.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = 83C8B62218AF57770071B040; + remoteInfo = lazyusf2; + }; + 83E2F4CF23566BB7006F7A41 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 83E2F4C923566B0C006F7A41 /* lazyusf2.xcodeproj */; + proxyType = 1; + remoteGlobalIDString = 83C8B62118AF57770071B040; + remoteInfo = lazyusf2; + }; 83FC325D1BF5AB9000962B36 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 83FC32591BF5AB9000962B36 /* HighlyExperimental.xcodeproj */; @@ -167,7 +167,7 @@ dstPath = ""; dstSubfolderSpec = 10; files = ( - 83DA153720F6EFE10096D348 /* lazyusf.framework in CopyFiles */, + 83E2F4D223566BD5006F7A41 /* lazyusf2.framework in CopyFiles */, 83CA2E4D1D7BE41300F2EA53 /* mGBA.framework in CopyFiles */, 83FC32C61BF5AF0600962B36 /* HighlyExperimental.framework in CopyFiles */, 83DE0CBD180B02D800269051 /* vio2sf.framework in CopyFiles */, @@ -200,8 +200,8 @@ 8360EEF317F92AC8005208A4 /* HighlyComplete-Prefix.pch */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "HighlyComplete-Prefix.pch"; sourceTree = ""; }; 83848FE61807623F00E7332D /* SSEQPlayer.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = SSEQPlayer.xcodeproj; path = ../../Frameworks/SSEQPlayer/SSEQPlayer.xcodeproj; sourceTree = ""; }; 83CA241E1D7BC47C00F2EA53 /* mGBA.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = mGBA.xcodeproj; path = ../../Frameworks/mGBA/mGBA.xcodeproj; sourceTree = ""; }; - 83DA152E20F6EFA80096D348 /* lazyusf.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = lazyusf.xcodeproj; path = ../../Frameworks/lazyusf/lazyusf.xcodeproj; sourceTree = ""; }; 83DE0C34180A9BD400269051 /* vio2sf.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = vio2sf.xcodeproj; path = ../../Frameworks/vio2sf/vio2sf.xcodeproj; sourceTree = ""; }; + 83E2F4C923566B0C006F7A41 /* lazyusf2.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = lazyusf2.xcodeproj; path = ../../Frameworks/lazyusf2/lazyusf2.xcodeproj; sourceTree = ""; }; 83FAF8A318ADD27F00057CAF /* PlaylistController.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = PlaylistController.h; path = ../../../Playlist/PlaylistController.h; sourceTree = ""; }; 83FC32591BF5AB9000962B36 /* HighlyExperimental.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = HighlyExperimental.xcodeproj; path = ../../Frameworks/HighlyExperimental/HighlyExperimental.xcodeproj; sourceTree = ""; }; /* End PBXFileReference section */ @@ -211,7 +211,7 @@ isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; files = ( - 83DA153620F6EFD00096D348 /* lazyusf.framework in Frameworks */, + 83E2F4D123566BC5006F7A41 /* lazyusf2.framework in Frameworks */, 83CA2E3D1D7BCF9B00F2EA53 /* mGBA.framework in Frameworks */, 83FC32C51BF5AEFB00962B36 /* HighlyExperimental.framework in Frameworks */, 83DE0CBC180B02CC00269051 /* vio2sf.framework in Frameworks */, @@ -279,6 +279,7 @@ 8360EEE617F92AC8005208A4 /* Frameworks */ = { isa = PBXGroup; children = ( + 83E2F4C923566B0C006F7A41 /* lazyusf2.xcodeproj */, 8333B6731DCC4999004C140D /* libz.tbd */, 83CA241E1D7BC47C00F2EA53 /* mGBA.xcodeproj */, 83FC32591BF5AB9000962B36 /* HighlyExperimental.xcodeproj */, @@ -290,7 +291,6 @@ 8343796317F97BDB00584396 /* HighlyAdvanced.xcodeproj */, 83848FE61807623F00E7332D /* SSEQPlayer.xcodeproj */, 83DE0C34180A9BD400269051 /* vio2sf.xcodeproj */, - 83DA152E20F6EFA80096D348 /* lazyusf.xcodeproj */, ); name = Frameworks; sourceTree = ""; @@ -344,14 +344,6 @@ name = Products; sourceTree = ""; }; - 83DA152F20F6EFA80096D348 /* Products */ = { - isa = PBXGroup; - children = ( - 83DA153320F6EFA80096D348 /* lazyusf.framework */, - ); - name = Products; - sourceTree = ""; - }; 83DE0C35180A9BD400269051 /* Products */ = { isa = PBXGroup; children = ( @@ -360,6 +352,14 @@ name = Products; sourceTree = ""; }; + 83E2F4CA23566B0C006F7A41 /* Products */ = { + isa = PBXGroup; + children = ( + 83E2F4CE23566B0C006F7A41 /* lazyusf2.framework */, + ); + name = Products; + sourceTree = ""; + }; 83FC325A1BF5AB9000962B36 /* Products */ = { isa = PBXGroup; children = ( @@ -384,7 +384,7 @@ buildRules = ( ); dependencies = ( - 83DA153520F6EFC90096D348 /* PBXTargetDependency */, + 83E2F4D023566BB7006F7A41 /* PBXTargetDependency */, 83CA2E3F1D7BCFB000F2EA53 /* PBXTargetDependency */, 83FC32C21BF5AEF300962B36 /* PBXTargetDependency */, 83DE0CBB180B02C500269051 /* PBXTargetDependency */, @@ -443,8 +443,8 @@ ProjectRef = 8343789C17F9658E00584396 /* HighlyTheoretical.xcodeproj */; }, { - ProductGroup = 83DA152F20F6EFA80096D348 /* Products */; - ProjectRef = 83DA152E20F6EFA80096D348 /* lazyusf.xcodeproj */; + ProductGroup = 83E2F4CA23566B0C006F7A41 /* Products */; + ProjectRef = 83E2F4C923566B0C006F7A41 /* lazyusf2.xcodeproj */; }, { ProductGroup = 83CA241F1D7BC47C00F2EA53 /* Products */; @@ -513,13 +513,6 @@ remoteRef = 83CA24231D7BC47E00F2EA53 /* PBXContainerItemProxy */; sourceTree = BUILT_PRODUCTS_DIR; }; - 83DA153320F6EFA80096D348 /* lazyusf.framework */ = { - isa = PBXReferenceProxy; - fileType = wrapper.framework; - path = lazyusf.framework; - remoteRef = 83DA153220F6EFA80096D348 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; 83DE0C3A180A9BD500269051 /* vio2sf.framework */ = { isa = PBXReferenceProxy; fileType = wrapper.framework; @@ -527,6 +520,13 @@ remoteRef = 83DE0C39180A9BD500269051 /* PBXContainerItemProxy */; sourceTree = BUILT_PRODUCTS_DIR; }; + 83E2F4CE23566B0C006F7A41 /* lazyusf2.framework */ = { + isa = PBXReferenceProxy; + fileType = wrapper.framework; + path = lazyusf2.framework; + remoteRef = 83E2F4CD23566B0C006F7A41 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; 83FC325E1BF5AB9000962B36 /* HighlyExperimental.framework */ = { isa = PBXReferenceProxy; fileType = wrapper.framework; @@ -584,16 +584,16 @@ name = mGBA; targetProxy = 83CA2E3E1D7BCFB000F2EA53 /* PBXContainerItemProxy */; }; - 83DA153520F6EFC90096D348 /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - name = lazyusf; - targetProxy = 83DA153420F6EFC90096D348 /* PBXContainerItemProxy */; - }; 83DE0CBB180B02C500269051 /* PBXTargetDependency */ = { isa = PBXTargetDependency; name = vio2sf; targetProxy = 83DE0CBA180B02C500269051 /* PBXContainerItemProxy */; }; + 83E2F4D023566BB7006F7A41 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + name = lazyusf2; + targetProxy = 83E2F4CF23566BB7006F7A41 /* PBXContainerItemProxy */; + }; 83FC32C21BF5AEF300962B36 /* PBXTargetDependency */ = { isa = PBXTargetDependency; name = HighlyExperimental; diff --git a/Plugins/HighlyComplete/HighlyComplete/HCDecoder.mm b/Plugins/HighlyComplete/HighlyComplete/HCDecoder.mm index 455626f04..11494cd24 100644 --- a/Plugins/HighlyComplete/HighlyComplete/HCDecoder.mm +++ b/Plugins/HighlyComplete/HighlyComplete/HCDecoder.mm @@ -33,7 +33,7 @@ #import -#import +#import #include diff --git a/Plugins/vgmstream/vgmstream.xcodeproj/project.pbxproj b/Plugins/vgmstream/vgmstream.xcodeproj/project.pbxproj index ce12e89fd..86fd5d550 100644 --- a/Plugins/vgmstream/vgmstream.xcodeproj/project.pbxproj +++ b/Plugins/vgmstream/vgmstream.xcodeproj/project.pbxproj @@ -8,6 +8,7 @@ /* Begin PBXBuildFile section */ 8340888E1F6F604C00DCD404 /* VGMMetadataReader.m in Sources */ = {isa = PBXBuildFile; fileRef = 8340888B1F6F604A00DCD404 /* VGMMetadataReader.m */; }; + 835D2420235AB319009A1251 /* VGMPropertiesReader.m in Sources */ = {isa = PBXBuildFile; fileRef = 835D241E235AB318009A1251 /* VGMPropertiesReader.m */; }; 836F6B1418BDB80D0095E648 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 836F6B1318BDB80D0095E648 /* Cocoa.framework */; }; 836F6B1E18BDB80D0095E648 /* InfoPlist.strings in Resources */ = {isa = PBXBuildFile; fileRef = 836F6B1C18BDB80D0095E648 /* InfoPlist.strings */; }; 836F705C18BDC40E0095E648 /* VGMDecoder.m in Sources */ = {isa = PBXBuildFile; fileRef = 836F705B18BDC40E0095E648 /* VGMDecoder.m */; }; @@ -51,6 +52,8 @@ 833F68491CDBCAC000AFB9F0 /* es */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = es; path = es.lproj/InfoPlist.strings; sourceTree = ""; }; 8340888B1F6F604A00DCD404 /* VGMMetadataReader.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = VGMMetadataReader.m; sourceTree = ""; }; 8340888D1F6F604B00DCD404 /* VGMMetadataReader.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = VGMMetadataReader.h; sourceTree = ""; }; + 835D241E235AB318009A1251 /* VGMPropertiesReader.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = VGMPropertiesReader.m; sourceTree = ""; }; + 835D241F235AB319009A1251 /* VGMPropertiesReader.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = VGMPropertiesReader.h; sourceTree = ""; }; 836F6B1018BDB80D0095E648 /* vgmstream.bundle */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = vgmstream.bundle; sourceTree = BUILT_PRODUCTS_DIR; }; 836F6B1318BDB80D0095E648 /* Cocoa.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Cocoa.framework; path = System/Library/Frameworks/Cocoa.framework; sourceTree = SDKROOT; }; 836F6B1618BDB80D0095E648 /* Foundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Foundation.framework; path = System/Library/Frameworks/Foundation.framework; sourceTree = SDKROOT; }; @@ -124,6 +127,8 @@ 836F6B1918BDB80D0095E648 /* vgmstream */ = { isa = PBXGroup; children = ( + 835D241F235AB319009A1251 /* VGMPropertiesReader.h */, + 835D241E235AB318009A1251 /* VGMPropertiesReader.m */, 8340888D1F6F604B00DCD404 /* VGMMetadataReader.h */, 8340888B1F6F604A00DCD404 /* VGMMetadataReader.m */, 83AA5D2C1F6E30080020821C /* VGMContainer.h */, @@ -248,6 +253,7 @@ 8340888E1F6F604C00DCD404 /* VGMMetadataReader.m in Sources */, 83AA5D2D1F6E30080020821C /* VGMInterface.m in Sources */, 83AA5D2E1F6E30080020821C /* VGMContainer.m in Sources */, + 835D2420235AB319009A1251 /* VGMPropertiesReader.m in Sources */, 836F705C18BDC40E0095E648 /* VGMDecoder.m in Sources */, ); runOnlyForDeploymentPostprocessing = 0; diff --git a/Plugins/vgmstream/vgmstream/VGMDecoder.h b/Plugins/vgmstream/vgmstream/VGMDecoder.h index c7c206e1e..1b2d086ca 100644 --- a/Plugins/vgmstream/vgmstream/VGMDecoder.h +++ b/Plugins/vgmstream/vgmstream/VGMDecoder.h @@ -13,6 +13,18 @@ #import "Plugin.h" +@interface VGMInfoCache : NSObject { + NSMutableDictionary *storage; +} + ++(id)sharedCache; + +-(void)stuffURL:(NSURL *)url stream:(VGMSTREAM *)stream; +-(NSDictionary*)getPropertiesForURL:(NSURL *)url; +-(NSDictionary*)getMetadataForURL:(NSURL *)url; + +@end + @interface VGMDecoder : NSObject { VGMSTREAM *stream; diff --git a/Plugins/vgmstream/vgmstream/VGMDecoder.m b/Plugins/vgmstream/vgmstream/VGMDecoder.m index 8faad1562..27a0ea1ce 100644 --- a/Plugins/vgmstream/vgmstream/VGMDecoder.m +++ b/Plugins/vgmstream/vgmstream/VGMDecoder.m @@ -11,6 +11,101 @@ #import "PlaylistController.h" +@implementation VGMInfoCache + ++(id)sharedCache { + static VGMInfoCache *sharedMyCache = nil; + static dispatch_once_t onceToken; + dispatch_once(&onceToken, ^{ + sharedMyCache = [[self alloc] init]; + }); + return sharedMyCache; +} + +-(id)init { + if (self = [super init]) { + storage = [[NSMutableDictionary alloc] init]; + } + return self; +} + +-(void)stuffURL:(NSURL *)url stream:(VGMSTREAM *)stream { + int track_num = [[url fragment] intValue]; + + int sampleRate = stream->sample_rate; + int channels = stream->channels; + long totalFrames = get_vgmstream_play_samples( 2.0, 10.0, 10.0, stream ); + long framesFade = stream->loop_flag ? sampleRate * 10 : 0; + long framesLength = totalFrames - framesFade; + + int bitrate = get_vgmstream_average_bitrate(stream); + + NSDictionary * properties = + [NSDictionary dictionaryWithObjectsAndKeys: + [NSNumber numberWithInt:bitrate / 1000], @"bitrate", + [NSNumber numberWithInt:sampleRate], @"sampleRate", + [NSNumber numberWithDouble:totalFrames], @"totalFrames", + [NSNumber numberWithInt:16], @"bitsPerSample", + [NSNumber numberWithBool:NO], @"floatingPoint", + [NSNumber numberWithInt:channels], @"channels", + [NSNumber numberWithBool:YES], @"seekable", + @"host", @"endian", + nil]; + + NSString * title; + + if ( stream->num_streams > 1 ) { + title = [NSString stringWithFormat:@"%@ - %s", [[url URLByDeletingPathExtension] lastPathComponent], stream->stream_name]; + } else { + title = [[url URLByDeletingPathExtension] lastPathComponent]; + } + + NSDictionary * metadata = + [NSDictionary dictionaryWithObjectsAndKeys: + title, @"title", + [NSNumber numberWithInt:track_num], @"track", + nil]; + + NSDictionary * package = + [NSDictionary dictionaryWithObjectsAndKeys: + properties, @"properties", + metadata, @"metadata", + nil]; + + @synchronized (self) { + [storage setValue:package forKey:[url absoluteString]]; + } +} + +-(NSDictionary*)getPropertiesForURL:(NSURL *)url { + NSDictionary *properties = nil; + + @synchronized (self) { + NSDictionary * package = [storage objectForKey:[url absoluteString]]; + if (package) { + properties = [package objectForKey:@"properties"]; + } + } + + return properties; +} + +-(NSDictionary*)getMetadataForURL:(NSURL *)url { + NSDictionary *metadata = nil; + + @synchronized (self) { + NSDictionary * package = [storage objectForKey:[url absoluteString]]; + if (package) { + metadata = [package objectForKey:@"metadata"]; + } + } + + return metadata; +} + +@end + + @implementation VGMDecoder - (BOOL)open:(id)s @@ -22,6 +117,8 @@ if (fragmentRange.location != NSNotFound) { path = [path substringToIndex:fragmentRange.location]; } + + NSLog(@"Opening %@ subsong %d", path, track_num); stream = init_vgmstream_from_cogfile([[path stringByReplacingPercentEscapesUsingEncoding:NSUTF8StringEncoding] UTF8String], track_num); if ( !stream ) @@ -45,16 +142,16 @@ - (NSDictionary *)properties { - return [NSDictionary dictionaryWithObjectsAndKeys: - [NSNumber numberWithInt:bitrate / 1000], @"bitrate", - [NSNumber numberWithInt:sampleRate], @"sampleRate", - [NSNumber numberWithDouble:totalFrames], @"totalFrames", - [NSNumber numberWithInt:16], @"bitsPerSample", - [NSNumber numberWithBool:NO], @"floatingPoint", - [NSNumber numberWithInt:channels], @"channels", - [NSNumber numberWithBool:YES], @"seekable", - @"host", @"endian", - nil]; + return [NSDictionary dictionaryWithObjectsAndKeys: + [NSNumber numberWithInt:bitrate / 1000], @"bitrate", + [NSNumber numberWithInt:sampleRate], @"sampleRate", + [NSNumber numberWithDouble:totalFrames], @"totalFrames", + [NSNumber numberWithInt:16], @"bitsPerSample", + [NSNumber numberWithBool:NO], @"floatingPoint", + [NSNumber numberWithInt:channels], @"channels", + [NSNumber numberWithBool:YES], @"seekable", + @"host", @"endian", + nil]; } - (int)readAudio:(void *)buf frames:(UInt32)frames diff --git a/Plugins/vgmstream/vgmstream/VGMMetadataReader.m b/Plugins/vgmstream/vgmstream/VGMMetadataReader.m index e3c2bcc96..d607b0176 100644 --- a/Plugins/vgmstream/vgmstream/VGMMetadataReader.m +++ b/Plugins/vgmstream/vgmstream/VGMMetadataReader.m @@ -29,30 +29,31 @@ + (NSDictionary *)metadataForURL:(NSURL *)url { - int track_num = [[url fragment] intValue]; + VGMInfoCache * sharedMyCache = [VGMInfoCache sharedCache]; - NSString * path = [url absoluteString]; - NSRange fragmentRange = [path rangeOfString:@"#" options:NSBackwardsSearch]; - if (fragmentRange.location != NSNotFound) { - path = [path substringToIndex:fragmentRange.location]; + NSDictionary * metadata = [sharedMyCache getMetadataForURL:url]; + + if (!metadata) { + int track_num = [[url fragment] intValue]; + + NSString * path = [url absoluteString]; + NSRange fragmentRange = [path rangeOfString:@"#" options:NSBackwardsSearch]; + if (fragmentRange.location != NSNotFound) { + path = [path substringToIndex:fragmentRange.location]; + } + + VGMSTREAM * stream = init_vgmstream_from_cogfile([[path stringByReplacingPercentEscapesUsingEncoding:NSUTF8StringEncoding] UTF8String], track_num); + if ( !stream ) + return nil; + + [sharedMyCache stuffURL:url stream:stream]; + + close_vgmstream(stream); + + metadata = [sharedMyCache getMetadataForURL:url]; } - VGMSTREAM * stream = init_vgmstream_from_cogfile([[path stringByReplacingPercentEscapesUsingEncoding:NSUTF8StringEncoding] UTF8String], track_num); - if ( !stream ) - return nil; - - NSString * title; - - if ( stream->num_streams > 1 ) { - title = [NSString stringWithFormat:@"%@ - %s", [[url URLByDeletingPathExtension] lastPathComponent], stream->stream_name]; - } else { - title = [[url URLByDeletingPathExtension] lastPathComponent]; - } - - return [NSDictionary dictionaryWithObjectsAndKeys: - title, @"title", - [NSNumber numberWithInt:track_num], @"track", - nil]; + return metadata; } @end diff --git a/Plugins/vgmstream/vgmstream/VGMPropertiesReader.h b/Plugins/vgmstream/vgmstream/VGMPropertiesReader.h new file mode 100644 index 000000000..36c02e316 --- /dev/null +++ b/Plugins/vgmstream/vgmstream/VGMPropertiesReader.h @@ -0,0 +1,17 @@ +// +// VGMPropertiesReader.h +// VGMStream +// +// Created by Christopher Snowhill on 10/18/19. +// Copyright 2019 __LoSnoCo__. All rights reserved. +// + +#import + +#import "Plugin.h" + +@interface VGMPropertiesReader : NSObject { + +} + +@end diff --git a/Plugins/vgmstream/vgmstream/VGMPropertiesReader.m b/Plugins/vgmstream/vgmstream/VGMPropertiesReader.m new file mode 100644 index 000000000..bf5fcb9f0 --- /dev/null +++ b/Plugins/vgmstream/vgmstream/VGMPropertiesReader.m @@ -0,0 +1,61 @@ +// +// VGMPropertiesReader.m +// VGMStream +// +// Created by Christopher Snowhill on 10/18/19. +// Copyright 2019 __LoSnoCo__. All rights reserved. +// + +#import "VGMPropertiesReader.h" +#import "VGMDecoder.h" +#import "VGMInterface.h" + +@implementation VGMPropertiesReader + ++ (NSArray *)fileTypes +{ + return [VGMDecoder fileTypes]; +} + ++ (NSArray *)mimeTypes +{ + return [VGMDecoder mimeTypes]; +} + ++ (float)priority +{ + return [VGMDecoder priority]; +} + ++ (NSDictionary *)propertiesForSource:(id)source +{ + VGMInfoCache * sharedMyCache = [VGMInfoCache sharedCache]; + + NSURL * url = [source url]; + + NSDictionary * properties = [sharedMyCache getPropertiesForURL:url]; + + if (!properties) { + int track_num = [[url fragment] intValue]; + + NSString * path = [url absoluteString]; + NSRange fragmentRange = [path rangeOfString:@"#" options:NSBackwardsSearch]; + if (fragmentRange.location != NSNotFound) { + path = [path substringToIndex:fragmentRange.location]; + } + + VGMSTREAM * stream = init_vgmstream_from_cogfile([[path stringByReplacingPercentEscapesUsingEncoding:NSUTF8StringEncoding] UTF8String], track_num); + if ( !stream ) + return nil; + + [sharedMyCache stuffURL:url stream:stream]; + + close_vgmstream(stream); + + properties = [sharedMyCache getPropertiesForURL:url]; + } + + return properties; +} + +@end diff --git a/Preferences/General/General.xcodeproj/project.pbxproj b/Preferences/General/General.xcodeproj/project.pbxproj index 8b86fc611..1c1b41985 100644 --- a/Preferences/General/General.xcodeproj/project.pbxproj +++ b/Preferences/General/General.xcodeproj/project.pbxproj @@ -19,6 +19,8 @@ 17E41DB80C130AA500AC744D /* Localizable.strings in Resources */ = {isa = PBXBuildFile; fileRef = 17E41DB70C130AA500AC744D /* Localizable.strings */; }; 17E78A7E0D68BE3C005C5A59 /* file_tree.png in Resources */ = {isa = PBXBuildFile; fileRef = 17E78A7D0D68BE3C005C5A59 /* file_tree.png */; }; 17E78B6A0D68C1E3005C5A59 /* Preferences.xib in Resources */ = {isa = PBXBuildFile; fileRef = 17E78B680D68C1E3005C5A59 /* Preferences.xib */; }; + 3DFAC490235B6B8100A29416 /* DEVELOPMENT_TEAM.xcconfig in Resources */ = {isa = PBXBuildFile; fileRef = 3DFAC48E235B6B8100A29416 /* DEVELOPMENT_TEAM.xcconfig */; }; + 3DFAC491235B6B8100A29416 /* Shared.xcconfig in Resources */ = {isa = PBXBuildFile; fileRef = 3DFAC48F235B6B8100A29416 /* Shared.xcconfig */; }; 8372053718E3DEAF007EFAD4 /* ResamplerBehaviorArrayController.m in Sources */ = {isa = PBXBuildFile; fileRef = 8372053618E3DEAF007EFAD4 /* ResamplerBehaviorArrayController.m */; }; 837C0D401C50954000CAE18F /* MIDIPluginBehaviorArrayController.m in Sources */ = {isa = PBXBuildFile; fileRef = 837C0D3F1C50954000CAE18F /* MIDIPluginBehaviorArrayController.m */; }; 8384917718084D9F00E7332D /* appearance.png in Resources */ = {isa = PBXBuildFile; fileRef = 8384917518084D9F00E7332D /* appearance.png */; }; @@ -95,6 +97,8 @@ 17D1B3F60F6349CE00694C57 /* PreferencePanePlugin.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = PreferencePanePlugin.h; path = ../PreferencePanePlugin.h; sourceTree = SOURCE_ROOT; }; 17E78A7D0D68BE3C005C5A59 /* file_tree.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; name = file_tree.png; path = Icons/file_tree.png; sourceTree = ""; }; 32DBCF630370AF2F00C91783 /* General_Prefix.pch */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = General_Prefix.pch; sourceTree = ""; }; + 3DFAC48E235B6B8100A29416 /* DEVELOPMENT_TEAM.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = DEVELOPMENT_TEAM.xcconfig; sourceTree = ""; }; + 3DFAC48F235B6B8100A29416 /* Shared.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = Shared.xcconfig; sourceTree = ""; }; 833F681B1CDBCAA700AFB9F0 /* es */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.strings; name = es; path = es.lproj/InfoPlist.strings; sourceTree = ""; }; 833F681C1CDBCAA700AFB9F0 /* es */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = es; path = es.lproj/Localizable.strings; sourceTree = ""; }; 8347435D20E6D58800063D45 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/Preferences.strings; sourceTree = ""; }; @@ -165,6 +169,7 @@ 32C88E010371C26100C91783 /* Other Sources */, 089C167CFE841241C02AAC07 /* Resources */, 089C1671FE841209C02AAC07 /* Frameworks and Libraries */, + 3DFAC48D235B6B8100A29416 /* Xcode-config */, 19C28FB8FE9D52D311CA2CBB /* Products */, ); name = General; @@ -279,6 +284,16 @@ name = "Other Sources"; sourceTree = ""; }; + 3DFAC48D235B6B8100A29416 /* Xcode-config */ = { + isa = PBXGroup; + children = ( + 3DFAC48E235B6B8100A29416 /* DEVELOPMENT_TEAM.xcconfig */, + 3DFAC48F235B6B8100A29416 /* Shared.xcconfig */, + ); + name = "Xcode-config"; + path = "../../Xcode-config"; + sourceTree = ""; + }; 838491801808588D00E7332D /* Products */ = { isa = PBXGroup; children = ( @@ -353,7 +368,6 @@ LastUpgradeCheck = 1020; TargetAttributes = { 8D5B49AC048680CD000E48DA = { - DevelopmentTeam = N6E749HJ2X; ProvisioningStyle = Automatic; }; }; @@ -409,6 +423,8 @@ 178E386E0C3DA64500EE6711 /* InfoPlist.strings in Resources */, 8E07ABDD0AAC95BC00A4B32F /* hot_keys.png in Resources */, 172D72AD0B8926CA00D095BB /* apple_remote.png in Resources */, + 3DFAC491235B6B8100A29416 /* Shared.xcconfig in Resources */, + 3DFAC490235B6B8100A29416 /* DEVELOPMENT_TEAM.xcconfig in Resources */, 8384917818084D9F00E7332D /* growl.png in Resources */, 8E15A86C0B894768006DC802 /* updates.png in Resources */, 8384917718084D9F00E7332D /* appearance.png in Resources */, @@ -500,7 +516,6 @@ CODE_SIGN_STYLE = Automatic; COMBINE_HIDPI_IMAGES = YES; COPY_PHASE_STRIP = NO; - DEVELOPMENT_TEAM = ""; GCC_DYNAMIC_NO_PIC = NO; GCC_MODEL_TUNING = G5; GCC_OPTIMIZATION_LEVEL = 0; @@ -523,7 +538,6 @@ CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; COMBINE_HIDPI_IMAGES = YES; - DEVELOPMENT_TEAM = ""; GCC_MODEL_TUNING = G5; GCC_PRECOMPILE_PREFIX_HEADER = YES; GCC_PREFIX_HEADER = General_Prefix.pch; @@ -539,6 +553,7 @@ }; 1DEB913F08733D840010E9CD /* Debug */ = { isa = XCBuildConfiguration; + baseConfigurationReference = 3DFAC48F235B6B8100A29416 /* Shared.xcconfig */; buildSettings = { ALWAYS_SEARCH_USER_PATHS = NO; CLANG_ANALYZER_LOCALIZABILITY_NONLOCALIZED = YES; @@ -584,6 +599,7 @@ }; 1DEB914008733D840010E9CD /* Release */ = { isa = XCBuildConfiguration; + baseConfigurationReference = 3DFAC48F235B6B8100A29416 /* Shared.xcconfig */; buildSettings = { ALWAYS_SEARCH_USER_PATHS = NO; CLANG_ANALYZER_LOCALIZABILITY_NONLOCALIZED = YES; diff --git a/Scripts/ffmpeg-build.sh b/Scripts/ffmpeg-build.sh index c61eec635..4f48dc2f9 100755 --- a/Scripts/ffmpeg-build.sh +++ b/Scripts/ffmpeg-build.sh @@ -7,11 +7,12 @@ --disable-swscale --disable-network --disable-swscale-alpha --disable-vdpau\ --disable-dxva2 --disable-everything --enable-hwaccels\ --enable-swresample\ + --enable-libopus\ --enable-parser=ac3,mpegaudio,xma,vorbis,opus\ --enable-demuxer=ac3,asf,xwma,mov,oma,ogg,tak,dsf,wav,aac,dts,dtshd,mp3,bink,flac,msf,xmv,caf,ape,smacker,pcm_s8,spdif,mpc,mpc8,rm\ - --enable-decoder=ac3,wmapro,wmav1,wmav2,wmavoice,wmalossless,xma1,xma2,dca,tak,dsd_lsbf,dsd_lsbf_planar,dsd_mbf,dsd_msbf_planar,aac,atrac3,atrac3p,mp3float,bink,binkaudio_dct,binkaudio_rdft,flac,pcm_s16be,pcm_s16be_planar,pcm_s16le,pcm_s16le_planar,vorbis,ape,adpcm_ima_qt,smackaud,opus,pcm_s8,pcm_s8_planar,mpc7,mpc8,cook\ + --enable-decoder=ac3,wmapro,wmav1,wmav2,wmavoice,wmalossless,xma1,xma2,dca,tak,dsd_lsbf,dsd_lsbf_planar,dsd_mbf,dsd_msbf_planar,aac,atrac3,atrac3p,mp3float,bink,binkaudio_dct,binkaudio_rdft,flac,pcm_s16be,pcm_s16be_planar,pcm_s16le,pcm_s16le_planar,vorbis,ape,adpcm_ima_qt,smackaud,libopus,pcm_s8,pcm_s8_planar,mpc7,mpc8,alac,adpcm_ima_dk3,adpcm_ima_dk4,cook\ --disable-parser=mpeg4video,h263\ --disable-decoder=mpeg2video,h263,h264,mpeg1video,mpeg2video,mpeg4,hevc,vp9\ --disable-version3 -make -j8 +make -j$(sysctl -n hw.logicalcpu) diff --git a/Scripts/update_feed.rb b/Scripts/update_feed.rb index 8855a23dc..f0c8a70d3 100755 --- a/Scripts/update_feed.rb +++ b/Scripts/update_feed.rb @@ -49,7 +49,7 @@ appcast_revision_code = appcast_revision_split[1] # latest_archive = %x[find #{archivedir} -type d -name 'Cog *.xcarchive' -print0 | xargs -0 stat -f "%m %N" -t "%Y" | sort -r | head -n1 | sed -E 's/^[0-9]+ //'].rstrip # app_path = "#{latest_archive}/Products#{ENV['HOME']}/Applications" script_path = File.expand_path(File.dirname(__FILE__)) - app_path = "#{script_path}/build/Build/Products/Release" + app_path = "#{Dir.home}/Desktop/Cog" plist = open("#{app_path}/Cog.app/Contents/Info.plist") plistdoc = Nokogiri::XML(plist) @@ -81,9 +81,6 @@ if appcast_revision < latest_revision #Copy the replacement build %x[cp -R '#{app_path}/Cog.app' '#{temp_path}/Cog.app'] - #Sign it! - %x[codesign -s 'Developer ID Application' -f '#{temp_path}/Cog.app'] - #Zip the app! %x[rm -f '#{temp_path}/#{feed}.zip'] %x[ditto -c -k --sequesterRsrc --keepParent --zlibCompressionLevel 9 '#{temp_path}/Cog.app' '#{temp_path}/#{feed}.zip'] diff --git a/ThirdParty/ffmpeg/include/libavcodec/avcodec.h b/ThirdParty/ffmpeg/include/libavcodec/avcodec.h index bc0eacd66..bcb931f0d 100644 --- a/ThirdParty/ffmpeg/include/libavcodec/avcodec.h +++ b/ThirdParty/ffmpeg/include/libavcodec/avcodec.h @@ -409,6 +409,7 @@ enum AVCodecID { AV_CODEC_ID_DXV, AV_CODEC_ID_SCREENPRESSO, AV_CODEC_ID_RSCC, + AV_CODEC_ID_AVS2, AV_CODEC_ID_Y41P = 0x8000, AV_CODEC_ID_AVRP, @@ -446,6 +447,17 @@ enum AVCodecID { AV_CODEC_ID_SVG, AV_CODEC_ID_GDV, AV_CODEC_ID_FITS, + AV_CODEC_ID_IMM4, + AV_CODEC_ID_PROSUMER, + AV_CODEC_ID_MWSC, + AV_CODEC_ID_WCMV, + AV_CODEC_ID_RASC, + AV_CODEC_ID_HYMT, + AV_CODEC_ID_ARBC, + AV_CODEC_ID_AGM, + AV_CODEC_ID_LSCR, + AV_CODEC_ID_VP4, + AV_CODEC_ID_IMM5, /* various PCM "codecs" */ AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs @@ -485,6 +497,7 @@ enum AVCodecID { AV_CODEC_ID_PCM_S64BE, AV_CODEC_ID_PCM_F16LE, AV_CODEC_ID_PCM_F24LE, + AV_CODEC_ID_PCM_VIDC, /* various ADPCM codecs */ AV_CODEC_ID_ADPCM_IMA_QT = 0x11000, @@ -529,6 +542,7 @@ enum AVCodecID { AV_CODEC_ID_ADPCM_AICA, AV_CODEC_ID_ADPCM_IMA_DAT4, AV_CODEC_ID_ADPCM_MTAF, + AV_CODEC_ID_ADPCM_AGM, /* AMR */ AV_CODEC_ID_AMR_NB = 0x12000, @@ -615,6 +629,7 @@ enum AVCodecID { AV_CODEC_ID_PAF_AUDIO, AV_CODEC_ID_ON2AVC, AV_CODEC_ID_DSS_SP, + AV_CODEC_ID_CODEC2, AV_CODEC_ID_FFWAVESYNTH = 0x15800, AV_CODEC_ID_SONIC, @@ -635,6 +650,10 @@ enum AVCodecID { AV_CODEC_ID_DOLBY_E, AV_CODEC_ID_APTX, AV_CODEC_ID_APTX_HD, + AV_CODEC_ID_SBC, + AV_CODEC_ID_ATRAC9, + AV_CODEC_ID_HCOM, + AV_CODEC_ID_ACELP_KELVIN, /* subtitle codecs */ AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs. @@ -663,12 +682,15 @@ enum AVCodecID { AV_CODEC_ID_PJS, AV_CODEC_ID_ASS, AV_CODEC_ID_HDMV_TEXT_SUBTITLE, + AV_CODEC_ID_TTML, + AV_CODEC_ID_ARIB_CAPTION, /* other specific kind of codecs (generally used for attachments) */ AV_CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs. AV_CODEC_ID_TTF = 0x18000, AV_CODEC_ID_SCTE_35, ///< Contain timestamp estimated through PCR of program stream. + AV_CODEC_ID_EPG, AV_CODEC_ID_BINTEXT = 0x18800, AV_CODEC_ID_XBIN, AV_CODEC_ID_IDF, @@ -841,6 +863,11 @@ typedef struct RcOverride{ * Use qpel MC. */ #define AV_CODEC_FLAG_QPEL (1 << 4) +/** + * Don't output frames whose parameters differ from first + * decoded frame in stream. + */ +#define AV_CODEC_FLAG_DROPCHANGED (1 << 5) /** * Use internal 2pass ratecontrol in first pass mode. */ @@ -1060,6 +1087,13 @@ typedef struct RcOverride{ */ #define AV_CODEC_CAP_HYBRID (1 << 19) +/** + * This codec takes the reordered_opaque field from input AVFrames + * and returns it in the corresponding field in AVCodecContext after + * encoding. + */ +#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE (1 << 20) + /** * Pan Scan area. * This specifies the area which should be displayed. @@ -1099,17 +1133,29 @@ typedef struct AVCPBProperties { * Maximum bitrate of the stream, in bits per second. * Zero if unknown or unspecified. */ +#if FF_API_UNSANITIZED_BITRATES int max_bitrate; +#else + int64_t max_bitrate; +#endif /** * Minimum bitrate of the stream, in bits per second. * Zero if unknown or unspecified. */ +#if FF_API_UNSANITIZED_BITRATES int min_bitrate; +#else + int64_t min_bitrate; +#endif /** * Average bitrate of the stream, in bits per second. * Zero if unknown or unspecified. */ +#if FF_API_UNSANITIZED_BITRATES int avg_bitrate; +#else + int64_t avg_bitrate; +#endif /** * The size of the buffer to which the ratecontrol is applied, in bits. @@ -1310,7 +1356,7 @@ enum AVPacketSideDataType { AV_PKT_DATA_METADATA_UPDATE, /** - * MPEGTS stream ID, this is required to pass the stream ID + * MPEGTS stream ID as uint8_t, this is required to pass the stream ID * information from the demuxer to the corresponding muxer. */ AV_PKT_DATA_MPEGTS_STREAM_ID, @@ -1342,6 +1388,25 @@ enum AVPacketSideDataType { */ AV_PKT_DATA_A53_CC, + /** + * This side data is encryption initialization data. + * The format is not part of ABI, use av_encryption_init_info_* methods to + * access. + */ + AV_PKT_DATA_ENCRYPTION_INIT_INFO, + + /** + * This side data contains encryption info for how to decrypt the packet. + * The format is not part of ABI, use av_encryption_info_* methods to access. + */ + AV_PKT_DATA_ENCRYPTION_INFO, + + /** + * Active Format Description data consisting of a single byte as specified + * in ETSI TS 101 154 using AVActiveFormatDescription enum. + */ + AV_PKT_DATA_AFD, + /** * The number of side data types. * This is not part of the public API/ABI in the sense that it may @@ -1597,6 +1662,7 @@ typedef struct AVCodecContext { * The allocated memory should be AV_INPUT_BUFFER_PADDING_SIZE bytes larger * than extradata_size to avoid problems if it is read with the bitstream reader. * The bytewise contents of extradata must not depend on the architecture or CPU endianness. + * Must be allocated with the av_malloc() family of functions. * - encoding: Set/allocated/freed by libavcodec. * - decoding: Set/allocated/freed by user. */ @@ -1994,15 +2060,19 @@ typedef struct AVCodecContext { /** * custom intra quantization matrix - * - encoding: Set by user, can be NULL. - * - decoding: Set by libavcodec. + * Must be allocated with the av_malloc() family of functions, and will be freed in + * avcodec_free_context(). + * - encoding: Set/allocated by user, freed by libavcodec. Can be NULL. + * - decoding: Set/allocated/freed by libavcodec. */ uint16_t *intra_matrix; /** * custom inter quantization matrix - * - encoding: Set by user, can be NULL. - * - decoding: Set by libavcodec. + * Must be allocated with the av_malloc() family of functions, and will be freed in + * avcodec_free_context(). + * - encoding: Set/allocated by user, freed by libavcodec. Can be NULL. + * - decoding: Set/allocated/freed by libavcodec. */ uint16_t *inter_matrix; @@ -2646,7 +2716,10 @@ typedef struct AVCodecContext { /** * opaque 64-bit number (generally a PTS) that will be reordered and * output in AVFrame.reordered_opaque - * - encoding: unused + * - encoding: Set by libavcodec to the reordered_opaque of the input + * frame corresponding to the last returned packet. Only + * supported by encoders with the + * AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE capability. * - decoding: Set by user. */ int64_t reordered_opaque; @@ -2918,6 +2991,28 @@ typedef struct AVCodecContext { #define FF_PROFILE_HEVC_MAIN_STILL_PICTURE 3 #define FF_PROFILE_HEVC_REXT 4 +#define FF_PROFILE_AV1_MAIN 0 +#define FF_PROFILE_AV1_HIGH 1 +#define FF_PROFILE_AV1_PROFESSIONAL 2 + +#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT 0xc0 +#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT 0xc1 +#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT 0xc2 +#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS 0xc3 +#define FF_PROFILE_MJPEG_JPEG_LS 0xf7 + +#define FF_PROFILE_SBC_MSBC 1 + +#define FF_PROFILE_PRORES_PROXY 0 +#define FF_PROFILE_PRORES_LT 1 +#define FF_PROFILE_PRORES_STANDARD 2 +#define FF_PROFILE_PRORES_HQ 3 +#define FF_PROFILE_PRORES_4444 4 +#define FF_PROFILE_PRORES_XQ 5 + +#define FF_PROFILE_ARIB_PROFILE_A 0 +#define FF_PROFILE_ARIB_PROFILE_C 1 + /** * level * - encoding: Set by user. @@ -3069,6 +3164,7 @@ typedef struct AVCodecContext { #define FF_SUB_CHARENC_MODE_DO_NOTHING -1 ///< do nothing (demuxer outputs a stream supposed to be already in UTF-8, or the codec is bitmap for instance) #define FF_SUB_CHARENC_MODE_AUTOMATIC 0 ///< libavcodec will select the mode itself #define FF_SUB_CHARENC_MODE_PRE_DECODER 1 ///< the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv +#define FF_SUB_CHARENC_MODE_IGNORE 2 ///< neither convert the subtitles, nor check them for valid UTF-8 /** * Skip processing alpha if supported by codec. @@ -3269,6 +3365,22 @@ typedef struct AVCodecContext { * used as reference pictures). */ int extra_hw_frames; + + /** + * The percentage of damaged samples to discard a frame. + * + * - decoding: set by user + * - encoding: unused + */ + int discard_damaged_percentage; + + /** + * The number of samples per frame to maximally accept. + * + * - decoding: set by user + * - encoding: set by user + */ + int64_t max_samples; } AVCodecContext; #if FF_API_CODEC_GET_SET @@ -4321,7 +4433,7 @@ int av_grow_packet(AVPacket *pkt, int grow_by); * Initialize a reference-counted packet from av_malloc()ed data. * * @param pkt packet to be initialized. This function will set the data, size, - * buf and destruct fields, all others are left untouched. + * and buf fields, all others are left untouched. * @param data Data allocated by av_malloc() to be used as packet data. If this * function returns successfully, the data is owned by the underlying AVBuffer. * The caller may not access the data through other means. @@ -4337,7 +4449,7 @@ int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size); * @warning This is a hack - the packet memory allocation stuff is broken. The * packet is allocated if it was not really allocated. * - * @deprecated Use av_packet_ref + * @deprecated Use av_packet_ref or av_packet_make_refcounted */ attribute_deprecated int av_dup_packet(AVPacket *pkt); @@ -4508,6 +4620,33 @@ void av_packet_move_ref(AVPacket *dst, AVPacket *src); */ int av_packet_copy_props(AVPacket *dst, const AVPacket *src); +/** + * Ensure the data described by a given packet is reference counted. + * + * @note This function does not ensure that the reference will be writable. + * Use av_packet_make_writable instead for that purpose. + * + * @see av_packet_ref + * @see av_packet_make_writable + * + * @param pkt packet whose data should be made reference counted. + * + * @return 0 on success, a negative AVERROR on error. On failure, the + * packet is unchanged. + */ +int av_packet_make_refcounted(AVPacket *pkt); + +/** + * Create a writable reference for the data described by a given packet, + * avoiding data copy if possible. + * + * @param pkt Packet whose data should be made writable. + * + * @return 0 on success, a negative AVERROR on failure. On failure, the + * packet is unchanged. + */ +int av_packet_make_writable(AVPacket *pkt); + /** * Convert valid timing fields (timestamps / durations) in a packet from one * timebase to another. Timestamps with unknown values (AV_NOPTS_VALUE) will be @@ -4800,6 +4939,9 @@ int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt); * AVERROR_EOF: the decoder has been fully flushed, and there will be * no more output frames * AVERROR(EINVAL): codec not opened, or it is an encoder + * AVERROR_INPUT_CHANGED: current decoded frame has changed parameters + * with respect to first decoded frame. Applicable + * when flag AV_CODEC_FLAG_DROPCHANGED is set. * other negative values: legitimate decoding errors */ int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame); @@ -5711,6 +5853,7 @@ typedef struct AVBitStreamFilter { int (*init)(AVBSFContext *ctx); int (*filter)(AVBSFContext *ctx, AVPacket *pkt); void (*close)(AVBSFContext *ctx); + void (*flush)(AVBSFContext *ctx); } AVBitStreamFilter; #if FF_API_OLD_BSF @@ -5837,6 +5980,11 @@ int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt); */ int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt); +/** + * Reset the internal bitstream filter state / flush internal buffers. + */ +void av_bsf_flush(AVBSFContext *ctx); + /** * Free a bitstream filter context and everything associated with it; write NULL * into the supplied pointer. diff --git a/ThirdParty/ffmpeg/include/libavcodec/mediacodec.h b/ThirdParty/ffmpeg/include/libavcodec/mediacodec.h index 5606d24a1..4c8545df0 100644 --- a/ThirdParty/ffmpeg/include/libavcodec/mediacodec.h +++ b/ThirdParty/ffmpeg/include/libavcodec/mediacodec.h @@ -85,4 +85,17 @@ typedef struct MediaCodecBuffer AVMediaCodecBuffer; */ int av_mediacodec_release_buffer(AVMediaCodecBuffer *buffer, int render); +/** + * Release a MediaCodec buffer and render it at the given time to the surface + * that is associated with the decoder. The timestamp must be within one second + * of the current java/lang/System#nanoTime() (which is implemented using + * CLOCK_MONOTONIC on Android). See the Android MediaCodec documentation + * of android/media/MediaCodec#releaseOutputBuffer(int,long) for more details. + * + * @param buffer the buffer to render + * @param time timestamp in nanoseconds of when to render the buffer + * @return 0 on success, < 0 otherwise + */ +int av_mediacodec_render_buffer_at_time(AVMediaCodecBuffer *buffer, int64_t time); + #endif /* AVCODEC_MEDIACODEC_H */ diff --git a/ThirdParty/ffmpeg/include/libavcodec/old_codec_ids.h b/ThirdParty/ffmpeg/include/libavcodec/old_codec_ids.h deleted file mode 100644 index c7aa0e0a1..000000000 --- a/ThirdParty/ffmpeg/include/libavcodec/old_codec_ids.h +++ /dev/null @@ -1,397 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_OLD_CODEC_IDS_H -#define AVCODEC_OLD_CODEC_IDS_H - -/* - * This header exists to prevent new codec IDs from being accidentally added to - * the deprecated list. - * Do not include it directly. It will be removed on next major bump - * - * Do not add new items to this list. Use the AVCodecID enum instead. - */ - - CODEC_ID_NONE = AV_CODEC_ID_NONE, - - /* video codecs */ - CODEC_ID_MPEG1VIDEO, - CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding -#if FF_API_XVMC - CODEC_ID_MPEG2VIDEO_XVMC, -#endif - CODEC_ID_H261, - CODEC_ID_H263, - CODEC_ID_RV10, - CODEC_ID_RV20, - CODEC_ID_MJPEG, - CODEC_ID_MJPEGB, - CODEC_ID_LJPEG, - CODEC_ID_SP5X, - CODEC_ID_JPEGLS, - CODEC_ID_MPEG4, - CODEC_ID_RAWVIDEO, - CODEC_ID_MSMPEG4V1, - CODEC_ID_MSMPEG4V2, - CODEC_ID_MSMPEG4V3, - CODEC_ID_WMV1, - CODEC_ID_WMV2, - CODEC_ID_H263P, - CODEC_ID_H263I, - CODEC_ID_FLV1, - CODEC_ID_SVQ1, - CODEC_ID_SVQ3, - CODEC_ID_DVVIDEO, - CODEC_ID_HUFFYUV, - CODEC_ID_CYUV, - CODEC_ID_H264, - CODEC_ID_INDEO3, - CODEC_ID_VP3, - CODEC_ID_THEORA, - CODEC_ID_ASV1, - CODEC_ID_ASV2, - CODEC_ID_FFV1, - CODEC_ID_4XM, - CODEC_ID_VCR1, - CODEC_ID_CLJR, - CODEC_ID_MDEC, - CODEC_ID_ROQ, - CODEC_ID_INTERPLAY_VIDEO, - CODEC_ID_XAN_WC3, - CODEC_ID_XAN_WC4, - CODEC_ID_RPZA, - CODEC_ID_CINEPAK, - CODEC_ID_WS_VQA, - CODEC_ID_MSRLE, - CODEC_ID_MSVIDEO1, - CODEC_ID_IDCIN, - CODEC_ID_8BPS, - CODEC_ID_SMC, - CODEC_ID_FLIC, - CODEC_ID_TRUEMOTION1, - CODEC_ID_VMDVIDEO, - CODEC_ID_MSZH, - CODEC_ID_ZLIB, - CODEC_ID_QTRLE, - CODEC_ID_TSCC, - CODEC_ID_ULTI, - CODEC_ID_QDRAW, - CODEC_ID_VIXL, - CODEC_ID_QPEG, - CODEC_ID_PNG, - CODEC_ID_PPM, - CODEC_ID_PBM, - CODEC_ID_PGM, - CODEC_ID_PGMYUV, - CODEC_ID_PAM, - CODEC_ID_FFVHUFF, - CODEC_ID_RV30, - CODEC_ID_RV40, - CODEC_ID_VC1, - CODEC_ID_WMV3, - CODEC_ID_LOCO, - CODEC_ID_WNV1, - CODEC_ID_AASC, - CODEC_ID_INDEO2, - CODEC_ID_FRAPS, - CODEC_ID_TRUEMOTION2, - CODEC_ID_BMP, - CODEC_ID_CSCD, - CODEC_ID_MMVIDEO, - CODEC_ID_ZMBV, - CODEC_ID_AVS, - CODEC_ID_SMACKVIDEO, - CODEC_ID_NUV, - CODEC_ID_KMVC, - CODEC_ID_FLASHSV, - CODEC_ID_CAVS, - CODEC_ID_JPEG2000, - CODEC_ID_VMNC, - CODEC_ID_VP5, - CODEC_ID_VP6, - CODEC_ID_VP6F, - CODEC_ID_TARGA, - CODEC_ID_DSICINVIDEO, - CODEC_ID_TIERTEXSEQVIDEO, - CODEC_ID_TIFF, - CODEC_ID_GIF, - CODEC_ID_DXA, - CODEC_ID_DNXHD, - CODEC_ID_THP, - CODEC_ID_SGI, - CODEC_ID_C93, - CODEC_ID_BETHSOFTVID, - CODEC_ID_PTX, - CODEC_ID_TXD, - CODEC_ID_VP6A, - CODEC_ID_AMV, - CODEC_ID_VB, - CODEC_ID_PCX, - CODEC_ID_SUNRAST, - CODEC_ID_INDEO4, - CODEC_ID_INDEO5, - CODEC_ID_MIMIC, - CODEC_ID_RL2, - CODEC_ID_ESCAPE124, - CODEC_ID_DIRAC, - CODEC_ID_BFI, - CODEC_ID_CMV, - CODEC_ID_MOTIONPIXELS, - CODEC_ID_TGV, - CODEC_ID_TGQ, - CODEC_ID_TQI, - CODEC_ID_AURA, - CODEC_ID_AURA2, - CODEC_ID_V210X, - CODEC_ID_TMV, - CODEC_ID_V210, - CODEC_ID_DPX, - CODEC_ID_MAD, - CODEC_ID_FRWU, - CODEC_ID_FLASHSV2, - CODEC_ID_CDGRAPHICS, - CODEC_ID_R210, - CODEC_ID_ANM, - CODEC_ID_BINKVIDEO, - CODEC_ID_IFF_ILBM, - CODEC_ID_IFF_BYTERUN1, - CODEC_ID_KGV1, - CODEC_ID_YOP, - CODEC_ID_VP8, - CODEC_ID_PICTOR, - CODEC_ID_ANSI, - CODEC_ID_A64_MULTI, - CODEC_ID_A64_MULTI5, - CODEC_ID_R10K, - CODEC_ID_MXPEG, - CODEC_ID_LAGARITH, - CODEC_ID_PRORES, - CODEC_ID_JV, - CODEC_ID_DFA, - CODEC_ID_WMV3IMAGE, - CODEC_ID_VC1IMAGE, - CODEC_ID_UTVIDEO, - CODEC_ID_BMV_VIDEO, - CODEC_ID_VBLE, - CODEC_ID_DXTORY, - CODEC_ID_V410, - CODEC_ID_XWD, - CODEC_ID_CDXL, - CODEC_ID_XBM, - CODEC_ID_ZEROCODEC, - CODEC_ID_MSS1, - CODEC_ID_MSA1, - CODEC_ID_TSCC2, - CODEC_ID_MTS2, - CODEC_ID_CLLC, - CODEC_ID_Y41P = MKBETAG('Y','4','1','P'), - CODEC_ID_ESCAPE130 = MKBETAG('E','1','3','0'), - CODEC_ID_EXR = MKBETAG('0','E','X','R'), - CODEC_ID_AVRP = MKBETAG('A','V','R','P'), - - CODEC_ID_G2M = MKBETAG( 0 ,'G','2','M'), - CODEC_ID_AVUI = MKBETAG('A','V','U','I'), - CODEC_ID_AYUV = MKBETAG('A','Y','U','V'), - CODEC_ID_V308 = MKBETAG('V','3','0','8'), - CODEC_ID_V408 = MKBETAG('V','4','0','8'), - CODEC_ID_YUV4 = MKBETAG('Y','U','V','4'), - CODEC_ID_SANM = MKBETAG('S','A','N','M'), - CODEC_ID_PAF_VIDEO = MKBETAG('P','A','F','V'), - CODEC_ID_SNOW = AV_CODEC_ID_SNOW, - - /* various PCM "codecs" */ - CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs - CODEC_ID_PCM_S16LE = 0x10000, - CODEC_ID_PCM_S16BE, - CODEC_ID_PCM_U16LE, - CODEC_ID_PCM_U16BE, - CODEC_ID_PCM_S8, - CODEC_ID_PCM_U8, - CODEC_ID_PCM_MULAW, - CODEC_ID_PCM_ALAW, - CODEC_ID_PCM_S32LE, - CODEC_ID_PCM_S32BE, - CODEC_ID_PCM_U32LE, - CODEC_ID_PCM_U32BE, - CODEC_ID_PCM_S24LE, - CODEC_ID_PCM_S24BE, - CODEC_ID_PCM_U24LE, - CODEC_ID_PCM_U24BE, - CODEC_ID_PCM_S24DAUD, - CODEC_ID_PCM_ZORK, - CODEC_ID_PCM_S16LE_PLANAR, - CODEC_ID_PCM_DVD, - CODEC_ID_PCM_F32BE, - CODEC_ID_PCM_F32LE, - CODEC_ID_PCM_F64BE, - CODEC_ID_PCM_F64LE, - CODEC_ID_PCM_BLURAY, - CODEC_ID_PCM_LXF, - CODEC_ID_S302M, - CODEC_ID_PCM_S8_PLANAR, - - /* various ADPCM codecs */ - CODEC_ID_ADPCM_IMA_QT = 0x11000, - CODEC_ID_ADPCM_IMA_WAV, - CODEC_ID_ADPCM_IMA_DK3, - CODEC_ID_ADPCM_IMA_DK4, - CODEC_ID_ADPCM_IMA_WS, - CODEC_ID_ADPCM_IMA_SMJPEG, - CODEC_ID_ADPCM_MS, - CODEC_ID_ADPCM_4XM, - CODEC_ID_ADPCM_XA, - CODEC_ID_ADPCM_ADX, - CODEC_ID_ADPCM_EA, - CODEC_ID_ADPCM_G726, - CODEC_ID_ADPCM_CT, - CODEC_ID_ADPCM_SWF, - CODEC_ID_ADPCM_YAMAHA, - CODEC_ID_ADPCM_SBPRO_4, - CODEC_ID_ADPCM_SBPRO_3, - CODEC_ID_ADPCM_SBPRO_2, - CODEC_ID_ADPCM_THP, - CODEC_ID_ADPCM_IMA_AMV, - CODEC_ID_ADPCM_EA_R1, - CODEC_ID_ADPCM_EA_R3, - CODEC_ID_ADPCM_EA_R2, - CODEC_ID_ADPCM_IMA_EA_SEAD, - CODEC_ID_ADPCM_IMA_EA_EACS, - CODEC_ID_ADPCM_EA_XAS, - CODEC_ID_ADPCM_EA_MAXIS_XA, - CODEC_ID_ADPCM_IMA_ISS, - CODEC_ID_ADPCM_G722, - CODEC_ID_ADPCM_IMA_APC, - CODEC_ID_VIMA = MKBETAG('V','I','M','A'), - - /* AMR */ - CODEC_ID_AMR_NB = 0x12000, - CODEC_ID_AMR_WB, - - /* RealAudio codecs*/ - CODEC_ID_RA_144 = 0x13000, - CODEC_ID_RA_288, - - /* various DPCM codecs */ - CODEC_ID_ROQ_DPCM = 0x14000, - CODEC_ID_INTERPLAY_DPCM, - CODEC_ID_XAN_DPCM, - CODEC_ID_SOL_DPCM, - - /* audio codecs */ - CODEC_ID_MP2 = 0x15000, - CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3 - CODEC_ID_AAC, - CODEC_ID_AC3, - CODEC_ID_DTS, - CODEC_ID_VORBIS, - CODEC_ID_DVAUDIO, - CODEC_ID_WMAV1, - CODEC_ID_WMAV2, - CODEC_ID_MACE3, - CODEC_ID_MACE6, - CODEC_ID_VMDAUDIO, - CODEC_ID_FLAC, - CODEC_ID_MP3ADU, - CODEC_ID_MP3ON4, - CODEC_ID_SHORTEN, - CODEC_ID_ALAC, - CODEC_ID_WESTWOOD_SND1, - CODEC_ID_GSM, ///< as in Berlin toast format - CODEC_ID_QDM2, - CODEC_ID_COOK, - CODEC_ID_TRUESPEECH, - CODEC_ID_TTA, - CODEC_ID_SMACKAUDIO, - CODEC_ID_QCELP, - CODEC_ID_WAVPACK, - CODEC_ID_DSICINAUDIO, - CODEC_ID_IMC, - CODEC_ID_MUSEPACK7, - CODEC_ID_MLP, - CODEC_ID_GSM_MS, /* as found in WAV */ - CODEC_ID_ATRAC3, - CODEC_ID_VOXWARE, - CODEC_ID_APE, - CODEC_ID_NELLYMOSER, - CODEC_ID_MUSEPACK8, - CODEC_ID_SPEEX, - CODEC_ID_WMAVOICE, - CODEC_ID_WMAPRO, - CODEC_ID_WMALOSSLESS, - CODEC_ID_ATRAC3P, - CODEC_ID_EAC3, - CODEC_ID_SIPR, - CODEC_ID_MP1, - CODEC_ID_TWINVQ, - CODEC_ID_TRUEHD, - CODEC_ID_MP4ALS, - CODEC_ID_ATRAC1, - CODEC_ID_BINKAUDIO_RDFT, - CODEC_ID_BINKAUDIO_DCT, - CODEC_ID_AAC_LATM, - CODEC_ID_QDMC, - CODEC_ID_CELT, - CODEC_ID_G723_1, - CODEC_ID_G729, - CODEC_ID_8SVX_EXP, - CODEC_ID_8SVX_FIB, - CODEC_ID_BMV_AUDIO, - CODEC_ID_RALF, - CODEC_ID_IAC, - CODEC_ID_ILBC, - CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'), - CODEC_ID_SONIC = MKBETAG('S','O','N','C'), - CODEC_ID_SONIC_LS = MKBETAG('S','O','N','L'), - CODEC_ID_PAF_AUDIO = MKBETAG('P','A','F','A'), - CODEC_ID_OPUS = MKBETAG('O','P','U','S'), - - /* subtitle codecs */ - CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs. - CODEC_ID_DVD_SUBTITLE = 0x17000, - CODEC_ID_DVB_SUBTITLE, - CODEC_ID_TEXT, ///< raw UTF-8 text - CODEC_ID_XSUB, - CODEC_ID_SSA, - CODEC_ID_MOV_TEXT, - CODEC_ID_HDMV_PGS_SUBTITLE, - CODEC_ID_DVB_TELETEXT, - CODEC_ID_SRT, - CODEC_ID_MICRODVD = MKBETAG('m','D','V','D'), - CODEC_ID_EIA_608 = MKBETAG('c','6','0','8'), - CODEC_ID_JACOSUB = MKBETAG('J','S','U','B'), - CODEC_ID_SAMI = MKBETAG('S','A','M','I'), - CODEC_ID_REALTEXT = MKBETAG('R','T','X','T'), - CODEC_ID_SUBVIEWER = MKBETAG('S','u','b','V'), - - /* other specific kind of codecs (generally used for attachments) */ - CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs. - CODEC_ID_TTF = 0x18000, - CODEC_ID_BINTEXT = MKBETAG('B','T','X','T'), - CODEC_ID_XBIN = MKBETAG('X','B','I','N'), - CODEC_ID_IDF = MKBETAG( 0 ,'I','D','F'), - CODEC_ID_OTF = MKBETAG( 0 ,'O','T','F'), - - CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like CODEC_ID_NONE) but lavf should attempt to identify it - - CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS - * stream (only used by libavformat) */ - CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems - * stream (only used by libavformat) */ - CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information. - -#endif /* AVCODEC_OLD_CODEC_IDS_H */ diff --git a/ThirdParty/ffmpeg/include/libavcodec/vda.h b/ThirdParty/ffmpeg/include/libavcodec/vda.h deleted file mode 100644 index bde14e31d..000000000 --- a/ThirdParty/ffmpeg/include/libavcodec/vda.h +++ /dev/null @@ -1,230 +0,0 @@ -/* - * VDA HW acceleration - * - * copyright (c) 2011 Sebastien Zwickert - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_VDA_H -#define AVCODEC_VDA_H - -/** - * @file - * @ingroup lavc_codec_hwaccel_vda - * Public libavcodec VDA header. - */ - -#include "libavcodec/avcodec.h" - -#include - -// emmintrin.h is unable to compile with -std=c99 -Werror=missing-prototypes -// http://openradar.appspot.com/8026390 -#undef __GNUC_STDC_INLINE__ - -#define Picture QuickdrawPicture -#include -#undef Picture - -#include "libavcodec/version.h" - -// extra flags not defined in VDADecoder.h -enum { - kVDADecodeInfo_Asynchronous = 1UL << 0, - kVDADecodeInfo_FrameDropped = 1UL << 1 -}; - -/** - * @defgroup lavc_codec_hwaccel_vda VDA - * @ingroup lavc_codec_hwaccel - * - * @{ - */ - -/** - * This structure is used to provide the necessary configurations and data - * to the VDA FFmpeg HWAccel implementation. - * - * The application must make it available as AVCodecContext.hwaccel_context. - */ -struct vda_context { - /** - * VDA decoder object. - * - * - encoding: unused - * - decoding: Set/Unset by libavcodec. - */ - VDADecoder decoder; - - /** - * The Core Video pixel buffer that contains the current image data. - * - * encoding: unused - * decoding: Set by libavcodec. Unset by user. - */ - CVPixelBufferRef cv_buffer; - - /** - * Use the hardware decoder in synchronous mode. - * - * encoding: unused - * decoding: Set by user. - */ - int use_sync_decoding; - - /** - * The frame width. - * - * - encoding: unused - * - decoding: Set/Unset by user. - */ - int width; - - /** - * The frame height. - * - * - encoding: unused - * - decoding: Set/Unset by user. - */ - int height; - - /** - * The frame format. - * - * - encoding: unused - * - decoding: Set/Unset by user. - */ - int format; - - /** - * The pixel format for output image buffers. - * - * - encoding: unused - * - decoding: Set/Unset by user. - */ - OSType cv_pix_fmt_type; - - /** - * unused - */ - uint8_t *priv_bitstream; - - /** - * unused - */ - int priv_bitstream_size; - - /** - * unused - */ - int priv_allocated_size; - - /** - * Use av_buffer to manage buffer. - * When the flag is set, the CVPixelBuffers returned by the decoder will - * be released automatically, so you have to retain them if necessary. - * Not setting this flag may cause memory leak. - * - * encoding: unused - * decoding: Set by user. - */ - int use_ref_buffer; -}; - -/** Create the video decoder. */ -int ff_vda_create_decoder(struct vda_context *vda_ctx, - uint8_t *extradata, - int extradata_size); - -/** Destroy the video decoder. */ -int ff_vda_destroy_decoder(struct vda_context *vda_ctx); - -/** - * This struct holds all the information that needs to be passed - * between the caller and libavcodec for initializing VDA decoding. - * Its size is not a part of the public ABI, it must be allocated with - * av_vda_alloc_context() and freed with av_free(). - */ -typedef struct AVVDAContext { - /** - * VDA decoder object. Created and freed by the caller. - */ - VDADecoder decoder; - - /** - * The output callback that must be passed to VDADecoderCreate. - * Set by av_vda_alloc_context(). - */ - VDADecoderOutputCallback output_callback; - - /** - * CVPixelBuffer Format Type that VDA will use for decoded frames; set by - * the caller. - */ - OSType cv_pix_fmt_type; -} AVVDAContext; - -/** - * Allocate and initialize a VDA context. - * - * This function should be called from the get_format() callback when the caller - * selects the AV_PIX_FMT_VDA format. The caller must then create the decoder - * object (using the output callback provided by libavcodec) that will be used - * for VDA-accelerated decoding. - * - * When decoding with VDA is finished, the caller must destroy the decoder - * object and free the VDA context using av_free(). - * - * @return the newly allocated context or NULL on failure - */ -AVVDAContext *av_vda_alloc_context(void); - -/** - * This is a convenience function that creates and sets up the VDA context using - * an internal implementation. - * - * @param avctx the corresponding codec context - * - * @return >= 0 on success, a negative AVERROR code on failure - */ -int av_vda_default_init(AVCodecContext *avctx); - -/** - * This is a convenience function that creates and sets up the VDA context using - * an internal implementation. - * - * @param avctx the corresponding codec context - * @param vdactx the VDA context to use - * - * @return >= 0 on success, a negative AVERROR code on failure - */ -int av_vda_default_init2(AVCodecContext *avctx, AVVDAContext *vdactx); - -/** - * This function must be called to free the VDA context initialized with - * av_vda_default_init(). - * - * @param avctx the corresponding codec context - */ -void av_vda_default_free(AVCodecContext *avctx); - -/** - * @} - */ - -#endif /* AVCODEC_VDA_H */ diff --git a/ThirdParty/ffmpeg/include/libavcodec/version.h b/ThirdParty/ffmpeg/include/libavcodec/version.h index d13dd41fa..2e047a6f5 100644 --- a/ThirdParty/ffmpeg/include/libavcodec/version.h +++ b/ThirdParty/ffmpeg/include/libavcodec/version.h @@ -28,8 +28,8 @@ #include "libavutil/version.h" #define LIBAVCODEC_VERSION_MAJOR 58 -#define LIBAVCODEC_VERSION_MINOR 11 -#define LIBAVCODEC_VERSION_MICRO 101 +#define LIBAVCODEC_VERSION_MINOR 59 +#define LIBAVCODEC_VERSION_MICRO 102 #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ LIBAVCODEC_VERSION_MINOR, \ @@ -132,6 +132,9 @@ #ifndef FF_API_NEXT #define FF_API_NEXT (LIBAVCODEC_VERSION_MAJOR < 59) #endif +#ifndef FF_API_UNSANITIZED_BITRATES +#define FF_API_UNSANITIZED_BITRATES (LIBAVCODEC_VERSION_MAJOR < 59) +#endif #endif /* AVCODEC_VERSION_H */ diff --git a/ThirdParty/ffmpeg/include/libavformat/avformat.h b/ThirdParty/ffmpeg/include/libavformat/avformat.h index 4ea1b5ab7..6eb329f13 100644 --- a/ThirdParty/ffmpeg/include/libavformat/avformat.h +++ b/ThirdParty/ffmpeg/include/libavformat/avformat.h @@ -36,17 +36,15 @@ * into component streams, and the reverse process of muxing - writing supplied * data in a specified container format. It also has an @ref lavf_io * "I/O module" which supports a number of protocols for accessing the data (e.g. - * file, tcp, http and others). Before using lavf, you need to call - * av_register_all() to register all compiled muxers, demuxers and protocols. + * file, tcp, http and others). * Unless you are absolutely sure you won't use libavformat's network * capabilities, you should also call avformat_network_init(). * * A supported input format is described by an AVInputFormat struct, conversely * an output format is described by AVOutputFormat. You can iterate over all - * registered input/output formats using the av_iformat_next() / - * av_oformat_next() functions. The protocols layer is not part of the public - * API, so you can only get the names of supported protocols with the - * avio_enum_protocols() function. + * input/output formats using the av_demuxer_iterate / av_muxer_iterate() functions. + * The protocols layer is not part of the public API, so you can only get the names + * of supported protocols with the avio_enum_protocols() function. * * Main lavf structure used for both muxing and demuxing is AVFormatContext, * which exports all information about the file being read or written. As with @@ -177,8 +175,8 @@ * Otherwise, if AVPacket.buf is NULL, the packet data is backed by a * static storage somewhere inside the demuxer and the packet is only valid * until the next av_read_frame() call or closing the file. If the caller - * requires a longer lifetime, av_dup_packet() will make an av_malloc()ed copy - * of it. + * requires a longer lifetime, av_packet_make_refcounted() will ensure that + * the data is reference counted, copying the data if necessary. * In both cases, the packet must be freed with av_packet_unref() when it is no * longer needed. * @@ -532,7 +530,16 @@ typedef struct AVOutputFormat { * New public fields should be added right above. ***************************************************************** */ - struct AVOutputFormat *next; + /** + * The ff_const59 define is not part of the public API and will + * be removed without further warning. + */ +#if FF_API_AVIOFORMAT +#define ff_const59 +#else +#define ff_const59 const +#endif + ff_const59 struct AVOutputFormat *next; /** * size of private data so that it can be allocated in the wrapper */ @@ -646,7 +653,7 @@ typedef struct AVInputFormat { /** * Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, - * AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, + * AVFMT_NOTIMESTAMPS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, * AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS. */ int flags; @@ -676,7 +683,7 @@ typedef struct AVInputFormat { * New public fields should be added right above. ***************************************************************** */ - struct AVInputFormat *next; + ff_const59 struct AVInputFormat *next; /** * Raw demuxers store their codec ID here. @@ -693,7 +700,7 @@ typedef struct AVInputFormat { * The buffer provided is guaranteed to be AVPROBE_PADDING_SIZE bytes * big so you do not have to check for that unless you need more. */ - int (*read_probe)(AVProbeData *); + int (*read_probe)(const AVProbeData *); /** * Read the format header and initialize the AVFormatContext @@ -845,6 +852,8 @@ typedef struct AVStreamInternal AVStreamInternal; #define AV_DISPOSITION_CAPTIONS 0x10000 #define AV_DISPOSITION_DESCRIPTIONS 0x20000 #define AV_DISPOSITION_METADATA 0x40000 +#define AV_DISPOSITION_DEPENDENT 0x80000 ///< dependent audio stream (mix_type=0 in mpegts) +#define AV_DISPOSITION_STILL_IMAGE 0x100000 ///< still images in video stream (still_picture_flag=1 in mpegts) /** * Options for behavior on timestamp wrap detection. @@ -1101,6 +1110,13 @@ typedef struct AVStream { */ int stream_identifier; + /** + * Details of the MPEG-TS program which created this stream. + */ + int program_num; + int pmt_version; + int pmt_stream_idx; + int64_t interleaver_chunk_size; int64_t interleaver_chunk_duration; @@ -1258,6 +1274,7 @@ typedef struct AVProgram { int program_num; int pmt_pid; int pcr_pid; + int pmt_version; /***************************************************************** * All fields below this line are not part of the public API. They @@ -1336,14 +1353,14 @@ typedef struct AVFormatContext { * * Demuxing only, set by avformat_open_input(). */ - struct AVInputFormat *iformat; + ff_const59 struct AVInputFormat *iformat; /** * The output container format. * * Muxing only, must be set by the caller before avformat_write_header(). */ - struct AVOutputFormat *oformat; + ff_const59 struct AVOutputFormat *oformat; /** * Format private data. This is an AVOptions-enabled struct @@ -1473,7 +1490,9 @@ typedef struct AVFormatContext { * This flag is mainly intended for testing. */ #define AVFMT_FLAG_BITEXACT 0x0400 -#define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Enable RTP MP4A-LATM payload +#if FF_API_LAVF_MP4A_LATM +#define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Deprecated, does nothing. +#endif #define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down) #define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted) #if FF_API_LAVF_KEEPSIDE_FLAG @@ -1925,6 +1944,13 @@ typedef struct AVFormatContext { * - decoding: set by user */ int max_streams; + + /** + * Skip duration calcuation in estimate_timings_from_pts. + * - encoding: unused + * - decoding: set by user + */ + int skip_estimate_duration_from_pts; } AVFormatContext; #if FF_API_FORMAT_GET_SET @@ -2192,7 +2218,7 @@ AVProgram *av_new_program(AVFormatContext *s, int id); * @return >= 0 in case of success, a negative AVERROR code in case of * failure */ -int avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oformat, +int avformat_alloc_output_context2(AVFormatContext **ctx, ff_const59 AVOutputFormat *oformat, const char *format_name, const char *filename); /** @@ -2203,7 +2229,7 @@ int avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oforma /** * Find AVInputFormat based on the short name of the input format. */ -AVInputFormat *av_find_input_format(const char *short_name); +ff_const59 AVInputFormat *av_find_input_format(const char *short_name); /** * Guess the file format. @@ -2212,7 +2238,7 @@ AVInputFormat *av_find_input_format(const char *short_name); * @param is_opened Whether the file is already opened; determines whether * demuxers with or without AVFMT_NOFILE are probed. */ -AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened); +ff_const59 AVInputFormat *av_probe_input_format(ff_const59 AVProbeData *pd, int is_opened); /** * Guess the file format. @@ -2226,7 +2252,7 @@ AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened); * If the score is <= AVPROBE_SCORE_MAX / 4 it is recommended * to retry with a larger probe buffer. */ -AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max); +ff_const59 AVInputFormat *av_probe_input_format2(ff_const59 AVProbeData *pd, int is_opened, int *score_max); /** * Guess the file format. @@ -2235,7 +2261,7 @@ AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score * demuxers with or without AVFMT_NOFILE are probed. * @param score_ret The score of the best detection. */ -AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret); +ff_const59 AVInputFormat *av_probe_input_format3(ff_const59 AVProbeData *pd, int is_opened, int *score_ret); /** * Probe a bytestream to determine the input format. Each time a probe returns @@ -2253,14 +2279,14 @@ AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score * the maximal score is AVPROBE_SCORE_MAX * AVERROR code otherwise */ -int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt, +int av_probe_input_buffer2(AVIOContext *pb, ff_const59 AVInputFormat **fmt, const char *url, void *logctx, unsigned int offset, unsigned int max_probe_size); /** * Like av_probe_input_buffer2() but returns 0 on success */ -int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt, +int av_probe_input_buffer(AVIOContext *pb, ff_const59 AVInputFormat **fmt, const char *url, void *logctx, unsigned int offset, unsigned int max_probe_size); @@ -2283,7 +2309,7 @@ int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt, * * @note If you want to use custom IO, preallocate the format context and set its pb field. */ -int avformat_open_input(AVFormatContext **ps, const char *url, AVInputFormat *fmt, AVDictionary **options); +int avformat_open_input(AVFormatContext **ps, const char *url, ff_const59 AVInputFormat *fmt, AVDictionary **options); attribute_deprecated int av_demuxer_open(AVFormatContext *ic); @@ -2668,14 +2694,14 @@ int av_write_trailer(AVFormatContext *s); * @param mime_type if non-NULL checks if mime_type matches with the * MIME type of the registered formats */ -AVOutputFormat *av_guess_format(const char *short_name, +ff_const59 AVOutputFormat *av_guess_format(const char *short_name, const char *filename, const char *mime_type); /** * Guess the codec ID based upon muxer and filename. */ -enum AVCodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name, +enum AVCodecID av_guess_codec(ff_const59 AVOutputFormat *fmt, const char *short_name, const char *filename, const char *mime_type, enum AVMediaType type); diff --git a/ThirdParty/ffmpeg/include/libavformat/avio.h b/ThirdParty/ffmpeg/include/libavformat/avio.h index 75912ce6b..9141642e7 100644 --- a/ThirdParty/ffmpeg/include/libavformat/avio.h +++ b/ThirdParty/ffmpeg/include/libavformat/avio.h @@ -236,7 +236,7 @@ typedef struct AVIOContext { int (*write_packet)(void *opaque, uint8_t *buf, int buf_size); int64_t (*seek)(void *opaque, int64_t offset, int whence); int64_t pos; /**< position in the file of the current buffer */ - int eof_reached; /**< true if eof reached */ + int eof_reached; /**< true if was unable to read due to error or eof */ int write_flag; /**< true if open for writing */ int max_packet_size; unsigned long checksum; @@ -566,14 +566,34 @@ static av_always_inline int64_t avio_tell(AVIOContext *s) int64_t avio_size(AVIOContext *s); /** - * feof() equivalent for AVIOContext. - * @return non zero if and only if end of file + * Similar to feof() but also returns nonzero on read errors. + * @return non zero if and only if at end of file or a read error happened when reading. */ int avio_feof(AVIOContext *s); -/** @warning Writes up to 4 KiB per call */ +/** + * Writes a formatted string to the context. + * @return number of bytes written, < 0 on error. + */ int avio_printf(AVIOContext *s, const char *fmt, ...) av_printf_format(2, 3); +/** + * Write a NULL terminated array of strings to the context. + * Usually you don't need to use this function directly but its macro wrapper, + * avio_print. + */ +void avio_print_string_array(AVIOContext *s, const char *strings[]); + +/** + * Write strings (const char *) to the context. + * This is a convenience macro around avio_print_string_array and it + * automatically creates the string array from the variable argument list. + * For simple string concatenations this function is more performant than using + * avio_printf since it does not need a temporary buffer. + */ +#define avio_print(s, ...) \ + avio_print_string_array(s, (const char*[]){__VA_ARGS__, NULL}) + /** * Force flushing of buffered data. * diff --git a/ThirdParty/ffmpeg/include/libavformat/version.h b/ThirdParty/ffmpeg/include/libavformat/version.h index 009f98109..bcd0408d2 100644 --- a/ThirdParty/ffmpeg/include/libavformat/version.h +++ b/ThirdParty/ffmpeg/include/libavformat/version.h @@ -32,7 +32,7 @@ // Major bumping may affect Ticket5467, 5421, 5451(compatibility with Chromium) // Also please add any ticket numbers that you believe might be affected here #define LIBAVFORMAT_VERSION_MAJOR 58 -#define LIBAVFORMAT_VERSION_MINOR 9 +#define LIBAVFORMAT_VERSION_MINOR 33 #define LIBAVFORMAT_VERSION_MICRO 100 #define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \ @@ -70,6 +70,9 @@ #ifndef FF_API_HLS_WRAP #define FF_API_HLS_WRAP (LIBAVFORMAT_VERSION_MAJOR < 59) #endif +#ifndef FF_API_HLS_USE_LOCALTIME +#define FF_API_HLS_USE_LOCALTIME (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif #ifndef FF_API_LAVF_KEEPSIDE_FLAG #define FF_API_LAVF_KEEPSIDE_FLAG (LIBAVFORMAT_VERSION_MAJOR < 59) #endif @@ -94,6 +97,15 @@ #ifndef FF_API_NEXT #define FF_API_NEXT (LIBAVFORMAT_VERSION_MAJOR < 59) #endif +#ifndef FF_API_DASH_MIN_SEG_DURATION +#define FF_API_DASH_MIN_SEG_DURATION (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_LAVF_MP4A_LATM +#define FF_API_LAVF_MP4A_LATM (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_AVIOFORMAT +#define FF_API_AVIOFORMAT (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif #ifndef FF_API_R_FRAME_RATE diff --git a/ThirdParty/ffmpeg/include/libavutil/audioconvert.h b/ThirdParty/ffmpeg/include/libavutil/audioconvert.h deleted file mode 100644 index 300a67cd3..000000000 --- a/ThirdParty/ffmpeg/include/libavutil/audioconvert.h +++ /dev/null @@ -1,6 +0,0 @@ - -#include "version.h" - -#if FF_API_AUDIOCONVERT -#include "channel_layout.h" -#endif diff --git a/ThirdParty/ffmpeg/include/libavutil/avassert.h b/ThirdParty/ffmpeg/include/libavutil/avassert.h index 46f3fea58..9abeadea4 100644 --- a/ThirdParty/ffmpeg/include/libavutil/avassert.h +++ b/ThirdParty/ffmpeg/include/libavutil/avassert.h @@ -66,7 +66,7 @@ #endif /** - * Assert that floating point opperations can be executed. + * Assert that floating point operations can be executed. * * This will av_assert0() that the cpu is not in MMX state on X86 */ diff --git a/ThirdParty/ffmpeg/include/libavutil/avstring.h b/ThirdParty/ffmpeg/include/libavutil/avstring.h index 04d269564..274335cfb 100644 --- a/ThirdParty/ffmpeg/include/libavutil/avstring.h +++ b/ThirdParty/ffmpeg/include/libavutil/avstring.h @@ -274,16 +274,21 @@ char *av_strireplace(const char *str, const char *from, const char *to); /** * Thread safe basename. - * @param path the path, on DOS both \ and / are considered separators. + * @param path the string to parse, on DOS both \ and / are considered separators. * @return pointer to the basename substring. + * If path does not contain a slash, the function returns a copy of path. + * If path is a NULL pointer or points to an empty string, a pointer + * to a string "." is returned. */ const char *av_basename(const char *path); /** * Thread safe dirname. - * @param path the path, on DOS both \ and / are considered separators. - * @return the path with the separator replaced by the string terminator or ".". - * @note the function may change the input string. + * @param path the string to parse, on DOS both \ and / are considered separators. + * @return A pointer to a string that's the parent directory of path. + * If path is a NULL pointer or points to an empty string, a pointer + * to a string "." is returned. + * @note the function may modify the contents of the path, so copies should be passed. */ const char *av_dirname(char *path); @@ -400,6 +405,12 @@ int av_utf8_decode(int32_t *codep, const uint8_t **bufp, const uint8_t *buf_end, */ int av_match_list(const char *name, const char *list, char separator); +/** + * See libc sscanf manual for more information. + * Locale-independent sscanf implementation. + */ +int av_sscanf(const char *string, const char *format, ...); + /** * @} */ diff --git a/ThirdParty/ffmpeg/include/libavutil/common.h b/ThirdParty/ffmpeg/include/libavutil/common.h index 0fffa6771..f09f0b486 100644 --- a/ThirdParty/ffmpeg/include/libavutil/common.h +++ b/ThirdParty/ffmpeg/include/libavutil/common.h @@ -53,7 +53,7 @@ //rounded division & shift #define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b)) /* assume b>0 */ -#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b)) +#define ROUNDED_DIV(a,b) (((a)>=0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b)) /* Fast a/(1<=0 and b>=0 */ #define AV_CEIL_RSHIFT(a,b) (!av_builtin_constant_p(b) ? -((-(a)) >> (b)) \ : ((a) + (1<<(b)) - 1) >> (b)) @@ -228,7 +228,7 @@ static av_always_inline av_const int av_clip_intp2_c(int a, int p) */ static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p) { - if (a & ~((1<> 31 & ((1<> 31 & ((1< +#include + +typedef struct AVSubsampleEncryptionInfo { + /** The number of bytes that are clear. */ + unsigned int bytes_of_clear_data; + + /** + * The number of bytes that are protected. If using pattern encryption, + * the pattern applies to only the protected bytes; if not using pattern + * encryption, all these bytes are encrypted. + */ + unsigned int bytes_of_protected_data; +} AVSubsampleEncryptionInfo; + +/** + * This describes encryption info for a packet. This contains frame-specific + * info for how to decrypt the packet before passing it to the decoder. + * + * The size of this struct is not part of the public ABI. + */ +typedef struct AVEncryptionInfo { + /** The fourcc encryption scheme, in big-endian byte order. */ + uint32_t scheme; + + /** + * Only used for pattern encryption. This is the number of 16-byte blocks + * that are encrypted. + */ + uint32_t crypt_byte_block; + + /** + * Only used for pattern encryption. This is the number of 16-byte blocks + * that are clear. + */ + uint32_t skip_byte_block; + + /** + * The ID of the key used to encrypt the packet. This should always be + * 16 bytes long, but may be changed in the future. + */ + uint8_t *key_id; + uint32_t key_id_size; + + /** + * The initialization vector. This may have been zero-filled to be the + * correct block size. This should always be 16 bytes long, but may be + * changed in the future. + */ + uint8_t *iv; + uint32_t iv_size; + + /** + * An array of subsample encryption info specifying how parts of the sample + * are encrypted. If there are no subsamples, then the whole sample is + * encrypted. + */ + AVSubsampleEncryptionInfo *subsamples; + uint32_t subsample_count; +} AVEncryptionInfo; + +/** + * This describes info used to initialize an encryption key system. + * + * The size of this struct is not part of the public ABI. + */ +typedef struct AVEncryptionInitInfo { + /** + * A unique identifier for the key system this is for, can be NULL if it + * is not known. This should always be 16 bytes, but may change in the + * future. + */ + uint8_t* system_id; + uint32_t system_id_size; + + /** + * An array of key IDs this initialization data is for. All IDs are the + * same length. Can be NULL if there are no known key IDs. + */ + uint8_t** key_ids; + /** The number of key IDs. */ + uint32_t num_key_ids; + /** + * The number of bytes in each key ID. This should always be 16, but may + * change in the future. + */ + uint32_t key_id_size; + + /** + * Key-system specific initialization data. This data is copied directly + * from the file and the format depends on the specific key system. This + * can be NULL if there is no initialization data; in that case, there + * will be at least one key ID. + */ + uint8_t* data; + uint32_t data_size; + + /** + * An optional pointer to the next initialization info in the list. + */ + struct AVEncryptionInitInfo *next; +} AVEncryptionInitInfo; + +/** + * Allocates an AVEncryptionInfo structure and sub-pointers to hold the given + * number of subsamples. This will allocate pointers for the key ID, IV, + * and subsample entries, set the size members, and zero-initialize the rest. + * + * @param subsample_count The number of subsamples. + * @param key_id_size The number of bytes in the key ID, should be 16. + * @param iv_size The number of bytes in the IV, should be 16. + * + * @return The new AVEncryptionInfo structure, or NULL on error. + */ +AVEncryptionInfo *av_encryption_info_alloc(uint32_t subsample_count, uint32_t key_id_size, uint32_t iv_size); + +/** + * Allocates an AVEncryptionInfo structure with a copy of the given data. + * @return The new AVEncryptionInfo structure, or NULL on error. + */ +AVEncryptionInfo *av_encryption_info_clone(const AVEncryptionInfo *info); + +/** + * Frees the given encryption info object. This MUST NOT be used to free the + * side-data data pointer, that should use normal side-data methods. + */ +void av_encryption_info_free(AVEncryptionInfo *info); + +/** + * Creates a copy of the AVEncryptionInfo that is contained in the given side + * data. The resulting object should be passed to av_encryption_info_free() + * when done. + * + * @return The new AVEncryptionInfo structure, or NULL on error. + */ +AVEncryptionInfo *av_encryption_info_get_side_data(const uint8_t *side_data, size_t side_data_size); + +/** + * Allocates and initializes side data that holds a copy of the given encryption + * info. The resulting pointer should be either freed using av_free or given + * to av_packet_add_side_data(). + * + * @return The new side-data pointer, or NULL. + */ +uint8_t *av_encryption_info_add_side_data( + const AVEncryptionInfo *info, size_t *side_data_size); + + +/** + * Allocates an AVEncryptionInitInfo structure and sub-pointers to hold the + * given sizes. This will allocate pointers and set all the fields. + * + * @return The new AVEncryptionInitInfo structure, or NULL on error. + */ +AVEncryptionInitInfo *av_encryption_init_info_alloc( + uint32_t system_id_size, uint32_t num_key_ids, uint32_t key_id_size, uint32_t data_size); + +/** + * Frees the given encryption init info object. This MUST NOT be used to free + * the side-data data pointer, that should use normal side-data methods. + */ +void av_encryption_init_info_free(AVEncryptionInitInfo* info); + +/** + * Creates a copy of the AVEncryptionInitInfo that is contained in the given + * side data. The resulting object should be passed to + * av_encryption_init_info_free() when done. + * + * @return The new AVEncryptionInitInfo structure, or NULL on error. + */ +AVEncryptionInitInfo *av_encryption_init_info_get_side_data( + const uint8_t* side_data, size_t side_data_size); + +/** + * Allocates and initializes side data that holds a copy of the given encryption + * init info. The resulting pointer should be either freed using av_free or + * given to av_packet_add_side_data(). + * + * @return The new side-data pointer, or NULL. + */ +uint8_t *av_encryption_init_info_add_side_data( + const AVEncryptionInitInfo *info, size_t *side_data_size); + +#endif /* AVUTIL_ENCRYPTION_INFO_H */ diff --git a/ThirdParty/ffmpeg/include/libavutil/ffversion.h b/ThirdParty/ffmpeg/include/libavutil/ffversion.h index 1792886a7..37f3abfce 100644 --- a/ThirdParty/ffmpeg/include/libavutil/ffversion.h +++ b/ThirdParty/ffmpeg/include/libavutil/ffversion.h @@ -1,5 +1,5 @@ /* Automatically generated by version.sh, do not manually edit! */ #ifndef AVUTIL_FFVERSION_H #define AVUTIL_FFVERSION_H -#define FFMPEG_VERSION "N-90075-g647fa49495" +#define FFMPEG_VERSION "N-95393-g29dac2927f" #endif /* AVUTIL_FFVERSION_H */ diff --git a/ThirdParty/ffmpeg/include/libavutil/file.h b/ThirdParty/ffmpeg/include/libavutil/file.h index 8666c7b1d..3ef4a6022 100644 --- a/ThirdParty/ffmpeg/include/libavutil/file.h +++ b/ThirdParty/ffmpeg/include/libavutil/file.h @@ -33,6 +33,8 @@ * allocated buffer or map it with mmap() when available. * In case of success set *bufptr to the read or mmapped buffer, and * *size to the size in bytes of the buffer in *bufptr. + * Unlike mmap this function succeeds with zero sized files, in this + * case *bufptr will be set to NULL and *size will be set to 0. * The returned buffer must be released with av_file_unmap(). * * @param log_offset loglevel offset used for logging diff --git a/ThirdParty/ffmpeg/include/libavutil/frame.h b/ThirdParty/ffmpeg/include/libavutil/frame.h index d54bd9a35..5d3231e7b 100644 --- a/ThirdParty/ffmpeg/include/libavutil/frame.h +++ b/ThirdParty/ffmpeg/include/libavutil/frame.h @@ -141,6 +141,44 @@ enum AVFrameSideDataType { * metadata key entry "name". */ AV_FRAME_DATA_ICC_PROFILE, + +#if FF_API_FRAME_QP + /** + * Implementation-specific description of the format of AV_FRAME_QP_TABLE_DATA. + * The contents of this side data are undocumented and internal; use + * av_frame_set_qp_table() and av_frame_get_qp_table() to access this in a + * meaningful way instead. + */ + AV_FRAME_DATA_QP_TABLE_PROPERTIES, + + /** + * Raw QP table data. Its format is described by + * AV_FRAME_DATA_QP_TABLE_PROPERTIES. Use av_frame_set_qp_table() and + * av_frame_get_qp_table() to access this instead. + */ + AV_FRAME_DATA_QP_TABLE_DATA, +#endif + + /** + * Timecode which conforms to SMPTE ST 12-1. The data is an array of 4 uint32_t + * where the first uint32_t describes how many (1-3) of the other timecodes are used. + * The timecode format is described in the av_timecode_get_smpte_from_framenum() + * function in libavutil/timecode.c. + */ + AV_FRAME_DATA_S12M_TIMECODE, + + /** + * HDR dynamic metadata associated with a video frame. The payload is + * an AVDynamicHDRPlus type and contains information for color + * volume transform - application 4 of SMPTE 2094-40:2016 standard. + */ + AV_FRAME_DATA_DYNAMIC_HDR_PLUS, + + /** + * Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of + * array element is implied by AVFrameSideData.size / AVRegionOfInterest.self_size. + */ + AV_FRAME_DATA_REGIONS_OF_INTEREST, }; enum AVActiveFormatDescription { @@ -168,6 +206,62 @@ typedef struct AVFrameSideData { AVBufferRef *buf; } AVFrameSideData; +/** + * Structure describing a single Region Of Interest. + * + * When multiple regions are defined in a single side-data block, they + * should be ordered from most to least important - some encoders are only + * capable of supporting a limited number of distinct regions, so will have + * to truncate the list. + * + * When overlapping regions are defined, the first region containing a given + * area of the frame applies. + */ +typedef struct AVRegionOfInterest { + /** + * Must be set to the size of this data structure (that is, + * sizeof(AVRegionOfInterest)). + */ + uint32_t self_size; + /** + * Distance in pixels from the top edge of the frame to the top and + * bottom edges and from the left edge of the frame to the left and + * right edges of the rectangle defining this region of interest. + * + * The constraints on a region are encoder dependent, so the region + * actually affected may be slightly larger for alignment or other + * reasons. + */ + int top; + int bottom; + int left; + int right; + /** + * Quantisation offset. + * + * Must be in the range -1 to +1. A value of zero indicates no quality + * change. A negative value asks for better quality (less quantisation), + * while a positive value asks for worse quality (greater quantisation). + * + * The range is calibrated so that the extreme values indicate the + * largest possible offset - if the rest of the frame is encoded with the + * worst possible quality, an offset of -1 indicates that this region + * should be encoded with the best possible quality anyway. Intermediate + * values are then interpolated in some codec-dependent way. + * + * For example, in 10-bit H.264 the quantisation parameter varies between + * -12 and 51. A typical qoffset value of -1/10 therefore indicates that + * this region should be encoded with a QP around one-tenth of the full + * range better than the rest of the frame. So, if most of the frame + * were to be encoded with a QP of around 30, this region would get a QP + * of around 24 (an offset of approximately -1/10 * (51 - -12) = -6.3). + * An extreme value of -1 would indicate that this region should be + * encoded with the best possible quality regardless of the treatment of + * the rest of the frame - that is, should be encoded at a QP of -12. + */ + AVRational qoffset; +} AVRegionOfInterest; + /** * This structure describes decoded (raw) audio or video data. * @@ -364,7 +458,6 @@ typedef struct AVFrame { * that time, * the decoder reorders values as needed and sets AVFrame.reordered_opaque * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque - * @deprecated in favor of pkt_pts */ int64_t reordered_opaque; @@ -497,6 +590,8 @@ typedef struct AVFrame { int decode_error_flags; #define FF_DECODE_ERROR_INVALID_BITSTREAM 1 #define FF_DECODE_ERROR_MISSING_REFERENCE 2 +#define FF_DECODE_ERROR_CONCEALMENT_ACTIVE 4 +#define FF_DECODE_ERROR_DECODE_SLICES 8 /** * number of audio channels, only used for audio. @@ -529,6 +624,7 @@ typedef struct AVFrame { attribute_deprecated int qscale_type; + attribute_deprecated AVBufferRef *qp_table_buf; #endif /** @@ -800,6 +896,22 @@ AVFrameSideData *av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size); +/** + * Add a new side data to a frame from an existing AVBufferRef + * + * @param frame a frame to which the side data should be added + * @param type the type of the added side data + * @param buf an AVBufferRef to add as side data. The ownership of + * the reference is transferred to the frame. + * + * @return newly added side data on success, NULL on error. On failure + * the frame is unchanged and the AVBufferRef remains owned by + * the caller. + */ +AVFrameSideData *av_frame_new_side_data_from_buf(AVFrame *frame, + enum AVFrameSideDataType type, + AVBufferRef *buf); + /** * @return a pointer to the side data of a given type on success, NULL if there * is no side data with such type in this frame. diff --git a/ThirdParty/ffmpeg/include/libavutil/hdr_dynamic_metadata.h b/ThirdParty/ffmpeg/include/libavutil/hdr_dynamic_metadata.h new file mode 100644 index 000000000..2d72de56a --- /dev/null +++ b/ThirdParty/ffmpeg/include/libavutil/hdr_dynamic_metadata.h @@ -0,0 +1,343 @@ +/* + * Copyright (c) 2018 Mohammad Izadi + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HDR_DYNAMIC_METADATA_H +#define AVUTIL_HDR_DYNAMIC_METADATA_H + +#include "frame.h" +#include "rational.h" + +/** + * Option for overlapping elliptical pixel selectors in an image. + */ +enum AVHDRPlusOverlapProcessOption { + AV_HDR_PLUS_OVERLAP_PROCESS_WEIGHTED_AVERAGING = 0, + AV_HDR_PLUS_OVERLAP_PROCESS_LAYERING = 1, +}; + +/** + * Represents the percentile at a specific percentage in + * a distribution. + */ +typedef struct AVHDRPlusPercentile { + /** + * The percentage value corresponding to a specific percentile linearized + * RGB value in the processing window in the scene. The value shall be in + * the range of 0 to100, inclusive. + */ + uint8_t percentage; + + /** + * The linearized maxRGB value at a specific percentile in the processing + * window in the scene. The value shall be in the range of 0 to 1, inclusive + * and in multiples of 0.00001. + */ + AVRational percentile; +} AVHDRPlusPercentile; + +/** + * Color transform parameters at a processing window in a dynamic metadata for + * SMPTE 2094-40. + */ +typedef struct AVHDRPlusColorTransformParams { + /** + * The relative x coordinate of the top left pixel of the processing + * window. The value shall be in the range of 0 and 1, inclusive and + * in multiples of 1/(width of Picture - 1). The value 1 corresponds + * to the absolute coordinate of width of Picture - 1. The value for + * first processing window shall be 0. + */ + AVRational window_upper_left_corner_x; + + /** + * The relative y coordinate of the top left pixel of the processing + * window. The value shall be in the range of 0 and 1, inclusive and + * in multiples of 1/(height of Picture - 1). The value 1 corresponds + * to the absolute coordinate of height of Picture - 1. The value for + * first processing window shall be 0. + */ + AVRational window_upper_left_corner_y; + + /** + * The relative x coordinate of the bottom right pixel of the processing + * window. The value shall be in the range of 0 and 1, inclusive and + * in multiples of 1/(width of Picture - 1). The value 1 corresponds + * to the absolute coordinate of width of Picture - 1. The value for + * first processing window shall be 1. + */ + AVRational window_lower_right_corner_x; + + /** + * The relative y coordinate of the bottom right pixel of the processing + * window. The value shall be in the range of 0 and 1, inclusive and + * in multiples of 1/(height of Picture - 1). The value 1 corresponds + * to the absolute coordinate of height of Picture - 1. The value for + * first processing window shall be 1. + */ + AVRational window_lower_right_corner_y; + + /** + * The x coordinate of the center position of the concentric internal and + * external ellipses of the elliptical pixel selector in the processing + * window. The value shall be in the range of 0 to (width of Picture - 1), + * inclusive and in multiples of 1 pixel. + */ + uint16_t center_of_ellipse_x; + + /** + * The y coordinate of the center position of the concentric internal and + * external ellipses of the elliptical pixel selector in the processing + * window. The value shall be in the range of 0 to (height of Picture - 1), + * inclusive and in multiples of 1 pixel. + */ + uint16_t center_of_ellipse_y; + + /** + * The clockwise rotation angle in degree of arc with respect to the + * positive direction of the x-axis of the concentric internal and external + * ellipses of the elliptical pixel selector in the processing window. The + * value shall be in the range of 0 to 180, inclusive and in multiples of 1. + */ + uint8_t rotation_angle; + + /** + * The semi-major axis value of the internal ellipse of the elliptical pixel + * selector in amount of pixels in the processing window. The value shall be + * in the range of 1 to 65535, inclusive and in multiples of 1 pixel. + */ + uint16_t semimajor_axis_internal_ellipse; + + /** + * The semi-major axis value of the external ellipse of the elliptical pixel + * selector in amount of pixels in the processing window. The value + * shall not be less than semimajor_axis_internal_ellipse of the current + * processing window. The value shall be in the range of 1 to 65535, + * inclusive and in multiples of 1 pixel. + */ + uint16_t semimajor_axis_external_ellipse; + + /** + * The semi-minor axis value of the external ellipse of the elliptical pixel + * selector in amount of pixels in the processing window. The value shall be + * in the range of 1 to 65535, inclusive and in multiples of 1 pixel. + */ + uint16_t semiminor_axis_external_ellipse; + + /** + * Overlap process option indicates one of the two methods of combining + * rendered pixels in the processing window in an image with at least one + * elliptical pixel selector. For overlapping elliptical pixel selectors + * in an image, overlap_process_option shall have the same value. + */ + enum AVHDRPlusOverlapProcessOption overlap_process_option; + + /** + * The maximum of the color components of linearized RGB values in the + * processing window in the scene. The values should be in the range of 0 to + * 1, inclusive and in multiples of 0.00001. maxscl[ 0 ], maxscl[ 1 ], and + * maxscl[ 2 ] are corresponding to R, G, B color components respectively. + */ + AVRational maxscl[3]; + + /** + * The average of linearized maxRGB values in the processing window in the + * scene. The value should be in the range of 0 to 1, inclusive and in + * multiples of 0.00001. + */ + AVRational average_maxrgb; + + /** + * The number of linearized maxRGB values at given percentiles in the + * processing window in the scene. The maximum value shall be 15. + */ + uint8_t num_distribution_maxrgb_percentiles; + + /** + * The linearized maxRGB values at given percentiles in the + * processing window in the scene. + */ + AVHDRPlusPercentile distribution_maxrgb[15]; + + /** + * The fraction of selected pixels in the image that contains the brightest + * pixel in the scene. The value shall be in the range of 0 to 1, inclusive + * and in multiples of 0.001. + */ + AVRational fraction_bright_pixels; + + /** + * This flag indicates that the metadata for the tone mapping function in + * the processing window is present (for value of 1). + */ + uint8_t tone_mapping_flag; + + /** + * The x coordinate of the separation point between the linear part and the + * curved part of the tone mapping function. The value shall be in the range + * of 0 to 1, excluding 0 and in multiples of 1/4095. + */ + AVRational knee_point_x; + + /** + * The y coordinate of the separation point between the linear part and the + * curved part of the tone mapping function. The value shall be in the range + * of 0 to 1, excluding 0 and in multiples of 1/4095. + */ + AVRational knee_point_y; + + /** + * The number of the intermediate anchor parameters of the tone mapping + * function in the processing window. The maximum value shall be 15. + */ + uint8_t num_bezier_curve_anchors; + + /** + * The intermediate anchor parameters of the tone mapping function in the + * processing window in the scene. The values should be in the range of 0 + * to 1, inclusive and in multiples of 1/1023. + */ + AVRational bezier_curve_anchors[15]; + + /** + * This flag shall be equal to 0 in bitstreams conforming to this version of + * this Specification. Other values are reserved for future use. + */ + uint8_t color_saturation_mapping_flag; + + /** + * The color saturation gain in the processing window in the scene. The + * value shall be in the range of 0 to 63/8, inclusive and in multiples of + * 1/8. The default value shall be 1. + */ + AVRational color_saturation_weight; +} AVHDRPlusColorTransformParams; + +/** + * This struct represents dynamic metadata for color volume transform - + * application 4 of SMPTE 2094-40:2016 standard. + * + * To be used as payload of a AVFrameSideData or AVPacketSideData with the + * appropriate type. + * + * @note The struct should be allocated with + * av_dynamic_hdr_plus_alloc() and its size is not a part of + * the public ABI. + */ +typedef struct AVDynamicHDRPlus { + /** + * Country code by Rec. ITU-T T.35 Annex A. The value shall be 0xB5. + */ + uint8_t itu_t_t35_country_code; + + /** + * Application version in the application defining document in ST-2094 + * suite. The value shall be set to 0. + */ + uint8_t application_version; + + /** + * The number of processing windows. The value shall be in the range + * of 1 to 3, inclusive. + */ + uint8_t num_windows; + + /** + * The color transform parameters for every processing window. + */ + AVHDRPlusColorTransformParams params[3]; + + /** + * The nominal maximum display luminance of the targeted system display, + * in units of 0.0001 candelas per square metre. The value shall be in + * the range of 0 to 10000, inclusive. + */ + AVRational targeted_system_display_maximum_luminance; + + /** + * This flag shall be equal to 0 in bit streams conforming to this version + * of this Specification. The value 1 is reserved for future use. + */ + uint8_t targeted_system_display_actual_peak_luminance_flag; + + /** + * The number of rows in the targeted system_display_actual_peak_luminance + * array. The value shall be in the range of 2 to 25, inclusive. + */ + uint8_t num_rows_targeted_system_display_actual_peak_luminance; + + /** + * The number of columns in the + * targeted_system_display_actual_peak_luminance array. The value shall be + * in the range of 2 to 25, inclusive. + */ + uint8_t num_cols_targeted_system_display_actual_peak_luminance; + + /** + * The normalized actual peak luminance of the targeted system display. The + * values should be in the range of 0 to 1, inclusive and in multiples of + * 1/15. + */ + AVRational targeted_system_display_actual_peak_luminance[25][25]; + + /** + * This flag shall be equal to 0 in bitstreams conforming to this version of + * this Specification. The value 1 is reserved for future use. + */ + uint8_t mastering_display_actual_peak_luminance_flag; + + /** + * The number of rows in the mastering_display_actual_peak_luminance array. + * The value shall be in the range of 2 to 25, inclusive. + */ + uint8_t num_rows_mastering_display_actual_peak_luminance; + + /** + * The number of columns in the mastering_display_actual_peak_luminance + * array. The value shall be in the range of 2 to 25, inclusive. + */ + uint8_t num_cols_mastering_display_actual_peak_luminance; + + /** + * The normalized actual peak luminance of the mastering display used for + * mastering the image essence. The values should be in the range of 0 to 1, + * inclusive and in multiples of 1/15. + */ + AVRational mastering_display_actual_peak_luminance[25][25]; +} AVDynamicHDRPlus; + +/** + * Allocate an AVDynamicHDRPlus structure and set its fields to + * default values. The resulting struct can be freed using av_freep(). + * + * @return An AVDynamicHDRPlus filled with default values or NULL + * on failure. + */ +AVDynamicHDRPlus *av_dynamic_hdr_plus_alloc(size_t *size); + +/** + * Allocate a complete AVDynamicHDRPlus and add it to the frame. + * @param frame The frame which side data is added to. + * + * @return The AVDynamicHDRPlus structure to be filled by caller or NULL + * on failure. + */ +AVDynamicHDRPlus *av_dynamic_hdr_plus_create_side_data(AVFrame *frame); + +#endif /* AVUTIL_HDR_DYNAMIC_METADATA_H */ diff --git a/ThirdParty/ffmpeg/include/libavutil/hwcontext_cuda.h b/ThirdParty/ffmpeg/include/libavutil/hwcontext_cuda.h index 12dae8449..81a0552ca 100644 --- a/ThirdParty/ffmpeg/include/libavutil/hwcontext_cuda.h +++ b/ThirdParty/ffmpeg/include/libavutil/hwcontext_cuda.h @@ -41,6 +41,7 @@ typedef struct AVCUDADeviceContextInternal AVCUDADeviceContextInternal; */ typedef struct AVCUDADeviceContext { CUcontext cuda_ctx; + CUstream stream; AVCUDADeviceContextInternal *internal; } AVCUDADeviceContext; diff --git a/ThirdParty/ffmpeg/include/libavutil/hwcontext_drm.h b/ThirdParty/ffmpeg/include/libavutil/hwcontext_drm.h index 2e225451e..42709f215 100644 --- a/ThirdParty/ffmpeg/include/libavutil/hwcontext_drm.h +++ b/ThirdParty/ffmpeg/include/libavutil/hwcontext_drm.h @@ -58,6 +58,9 @@ typedef struct AVDRMObjectDescriptor { size_t size; /** * Format modifier applied to the object (DRM_FORMAT_MOD_*). + * + * If the format modifier is unknown then this should be set to + * DRM_FORMAT_MOD_INVALID. */ uint64_t format_modifier; } AVDRMObjectDescriptor; diff --git a/ThirdParty/ffmpeg/include/libavutil/hwcontext_videotoolbox.h b/ThirdParty/ffmpeg/include/libavutil/hwcontext_videotoolbox.h index 380918d92..5074d79e6 100644 --- a/ThirdParty/ffmpeg/include/libavutil/hwcontext_videotoolbox.h +++ b/ThirdParty/ffmpeg/include/libavutil/hwcontext_videotoolbox.h @@ -51,4 +51,10 @@ enum AVPixelFormat av_map_videotoolbox_format_to_pixfmt(uint32_t cv_fmt); */ uint32_t av_map_videotoolbox_format_from_pixfmt(enum AVPixelFormat pix_fmt); +/** + * Same as av_map_videotoolbox_format_from_pixfmt function, but can map and + * return full range pixel formats via a flag. + */ +uint32_t av_map_videotoolbox_format_from_pixfmt2(enum AVPixelFormat pix_fmt, bool full_range); + #endif /* AVUTIL_HWCONTEXT_VIDEOTOOLBOX_H */ diff --git a/ThirdParty/ffmpeg/include/libavutil/intreadwrite.h b/ThirdParty/ffmpeg/include/libavutil/intreadwrite.h index 67c763b13..4c8413a53 100644 --- a/ThirdParty/ffmpeg/include/libavutil/intreadwrite.h +++ b/ThirdParty/ffmpeg/include/libavutil/intreadwrite.h @@ -542,6 +542,21 @@ union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias; # define AV_WN64A(p, v) AV_WNA(64, p, v) #endif +#if AV_HAVE_BIGENDIAN +# define AV_RLA(s, p) av_bswap##s(AV_RN##s##A(p)) +# define AV_WLA(s, p, v) AV_WN##s##A(p, av_bswap##s(v)) +#else +# define AV_RLA(s, p) AV_RN##s##A(p) +# define AV_WLA(s, p, v) AV_WN##s##A(p, v) +#endif + +#ifndef AV_RL64A +# define AV_RL64A(p) AV_RLA(64, p) +#endif +#ifndef AV_WL64A +# define AV_WL64A(p, v) AV_WLA(64, p, v) +#endif + /* * The AV_COPYxxU macros are suitable for copying data to/from unaligned * memory locations. diff --git a/ThirdParty/ffmpeg/include/libavutil/lzo.h b/ThirdParty/ffmpeg/include/libavutil/lzo.h deleted file mode 100644 index c03403992..000000000 --- a/ThirdParty/ffmpeg/include/libavutil/lzo.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * LZO 1x decompression - * copyright (c) 2006 Reimar Doeffinger - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_LZO_H -#define AVUTIL_LZO_H - -/** - * @defgroup lavu_lzo LZO - * @ingroup lavu_crypto - * - * @{ - */ - -#include - -/** @name Error flags returned by av_lzo1x_decode - * @{ */ -/// end of the input buffer reached before decoding finished -#define AV_LZO_INPUT_DEPLETED 1 -/// decoded data did not fit into output buffer -#define AV_LZO_OUTPUT_FULL 2 -/// a reference to previously decoded data was wrong -#define AV_LZO_INVALID_BACKPTR 4 -/// a non-specific error in the compressed bitstream -#define AV_LZO_ERROR 8 -/** @} */ - -#define AV_LZO_INPUT_PADDING 8 -#define AV_LZO_OUTPUT_PADDING 12 - -/** - * @brief Decodes LZO 1x compressed data. - * @param out output buffer - * @param outlen size of output buffer, number of bytes left are returned here - * @param in input buffer - * @param inlen size of input buffer, number of bytes left are returned here - * @return 0 on success, otherwise a combination of the error flags above - * - * Make sure all buffers are appropriately padded, in must provide - * AV_LZO_INPUT_PADDING, out must provide AV_LZO_OUTPUT_PADDING additional bytes. - */ -int av_lzo1x_decode(void *out, int *outlen, const void *in, int *inlen); - -/** - * @} - */ - -#endif /* AVUTIL_LZO_H */ diff --git a/ThirdParty/ffmpeg/include/libavutil/mem.h b/ThirdParty/ffmpeg/include/libavutil/mem.h index 7e0b12a8a..5fb1a02dd 100644 --- a/ThirdParty/ffmpeg/include/libavutil/mem.h +++ b/ThirdParty/ffmpeg/include/libavutil/mem.h @@ -339,7 +339,7 @@ av_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size) * @warning Unlike av_malloc(), the allocated memory is not guaranteed to be * correctly aligned. */ -av_alloc_size(2, 3) int av_reallocp_array(void *ptr, size_t nmemb, size_t size); +int av_reallocp_array(void *ptr, size_t nmemb, size_t size); /** * Reallocate the given buffer if it is not large enough, otherwise do nothing. @@ -363,10 +363,10 @@ av_alloc_size(2, 3) int av_reallocp_array(void *ptr, size_t nmemb, size_t size); * @endcode * * @param[in,out] ptr Already allocated buffer, or `NULL` - * @param[in,out] size Pointer to current size of buffer `ptr`. `*size` is - * changed to `min_size` in case of success or 0 in - * case of failure - * @param[in] min_size New size of buffer `ptr` + * @param[in,out] size Pointer to the size of buffer `ptr`. `*size` is + * updated to the new allocated size, in particular 0 + * in case of failure. + * @param[in] min_size Desired minimal size of buffer `ptr` * @return `ptr` if the buffer is large enough, a pointer to newly reallocated * buffer if the buffer was not large enough, or `NULL` in case of * error @@ -397,10 +397,10 @@ void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size); * @param[in,out] ptr Pointer to pointer to an already allocated buffer. * `*ptr` will be overwritten with pointer to new * buffer on success or `NULL` on failure - * @param[in,out] size Pointer to current size of buffer `*ptr`. `*size` is - * changed to `min_size` in case of success or 0 in - * case of failure - * @param[in] min_size New size of buffer `*ptr` + * @param[in,out] size Pointer to the size of buffer `*ptr`. `*size` is + * updated to the new allocated size, in particular 0 + * in case of failure. + * @param[in] min_size Desired minimal size of buffer `*ptr` * @see av_realloc() * @see av_fast_mallocz() */ @@ -418,10 +418,10 @@ void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size); * @param[in,out] ptr Pointer to pointer to an already allocated buffer. * `*ptr` will be overwritten with pointer to new * buffer on success or `NULL` on failure - * @param[in,out] size Pointer to current size of buffer `*ptr`. `*size` is - * changed to `min_size` in case of success or 0 in - * case of failure - * @param[in] min_size New size of buffer `*ptr` + * @param[in,out] size Pointer to the size of buffer `*ptr`. `*size` is + * updated to the new allocated size, in particular 0 + * in case of failure. + * @param[in] min_size Desired minimal size of buffer `*ptr` * @see av_fast_malloc() */ void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size); diff --git a/ThirdParty/ffmpeg/include/libavutil/old_pix_fmts.h b/ThirdParty/ffmpeg/include/libavutil/old_pix_fmts.h deleted file mode 100644 index cd1ed7c19..000000000 --- a/ThirdParty/ffmpeg/include/libavutil/old_pix_fmts.h +++ /dev/null @@ -1,177 +0,0 @@ -/* - * copyright (c) 2006-2012 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_OLD_PIX_FMTS_H -#define AVUTIL_OLD_PIX_FMTS_H - -/* - * This header exists to prevent new pixel formats from being accidentally added - * to the deprecated list. - * Do not include it directly. It will be removed on next major bump - * - * Do not add new items to this list. Use the AVPixelFormat enum instead. - */ - PIX_FMT_NONE = AV_PIX_FMT_NONE, - PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) - PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr - PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB... - PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR... - PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) - PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples) - PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples) - PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) - PIX_FMT_GRAY8, ///< Y , 8bpp - PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb - PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb - PIX_FMT_PAL8, ///< 8 bit with PIX_FMT_RGB32 palette - PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range - PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range - PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range -#if FF_API_XVMC - PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing - PIX_FMT_XVMC_MPEG2_IDCT, -#endif /* FF_API_XVMC */ - PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 - PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 - PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) - PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits - PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb) - PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb) - PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits - PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb) - PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) - PIX_FMT_NV21, ///< as above, but U and V bytes are swapped - - PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB... - PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA... - PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR... - PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA... - - PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian - PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian - PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) - PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range - PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) -#if FF_API_VDPAU - PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers - PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers - PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers - PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers - PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers -#endif - PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian - PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian - - PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian - PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian - PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0 - PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0 - - PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian - PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian - PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1 - PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1 - - PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers - PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers - PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers - - PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian - PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian - PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian - PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian - PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian - PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian -#if FF_API_VDPAU - PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers -#endif - PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer - - PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0 - PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0 - PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1 - PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1 - PIX_FMT_GRAY8A, ///< 8bit gray, 8bit alpha - PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian - PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian - - //the following 10 formats have the disadvantage of needing 1 format for each bit depth, thus - //If you want to support multiple bit depths, then using PIX_FMT_YUV420P16* with the bpp stored separately - //is better - PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian - PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian - PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian - PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian - PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian - PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian - PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian - PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian - PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian - PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian - PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian - PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian - PIX_FMT_VDA_VLD, ///< hardware decoding through VDA - -#ifdef AV_PIX_FMT_ABI_GIT_MASTER - PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian - PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian - PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian - PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian -#endif - PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp - PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big endian - PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little endian - PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big endian - PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little endian - PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big endian - PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little endian - -#ifndef AV_PIX_FMT_ABI_GIT_MASTER - PIX_FMT_RGBA64BE=0x123, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian - PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian - PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian - PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian -#endif - PIX_FMT_0RGB=0x123+4, ///< packed RGB 8:8:8, 32bpp, 0RGB0RGB... - PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGB0RGB0... - PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, 0BGR0BGR... - PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGR0BGR0... - PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) - PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) - - PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian - PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian - PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian - PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian - PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian - PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian - PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian - PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian - PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian - PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian - PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian - PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian - PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big endian - PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little endian - PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big endian - PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little endian - - PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions -#endif /* AVUTIL_OLD_PIX_FMTS_H */ diff --git a/ThirdParty/ffmpeg/include/libavutil/opt.h b/ThirdParty/ffmpeg/include/libavutil/opt.h index 391720f2e..bc98ab104 100644 --- a/ThirdParty/ffmpeg/include/libavutil/opt.h +++ b/ThirdParty/ffmpeg/include/libavutil/opt.h @@ -287,7 +287,10 @@ typedef struct AVOption { * This flag only makes sense when AV_OPT_FLAG_EXPORT is also set. */ #define AV_OPT_FLAG_READONLY 128 +#define AV_OPT_FLAG_BSF_PARAM (1<<8) ///< a generic parameter which can be set by the user for bit stream filtering +#define AV_OPT_FLAG_RUNTIME_PARAM (1<<15) ///< a generic parameter which can be set by the user at runtime #define AV_OPT_FLAG_FILTERING_PARAM (1<<16) ///< a generic parameter which can be set by the user for filtering +#define AV_OPT_FLAG_DEPRECATED (1<<17) ///< set if option is deprecated, users should refer to AVOption.help text for more information //FIXME think about enc-audio, ... style flags /** diff --git a/ThirdParty/ffmpeg/include/libavutil/pixdesc.h b/ThirdParty/ffmpeg/include/libavutil/pixdesc.h index ea046033a..c055810ae 100644 --- a/ThirdParty/ffmpeg/include/libavutil/pixdesc.h +++ b/ThirdParty/ffmpeg/include/libavutil/pixdesc.h @@ -154,17 +154,21 @@ typedef struct AVPixFmtDescriptor { * in some cases be simpler. Or the data can be interpreted purely based on * the pixel format without using the palette. * An example of a pseudo-paletted format is AV_PIX_FMT_GRAY8 + * + * @deprecated This flag is deprecated, and will be removed. When it is removed, + * the extra palette allocation in AVFrame.data[1] is removed as well. Only + * actual paletted formats (as indicated by AV_PIX_FMT_FLAG_PAL) will have a + * palette. Starting with FFmpeg versions which have this flag deprecated, the + * extra "pseudo" palette is already ignored, and API users are not required to + * allocate a palette for AV_PIX_FMT_FLAG_PSEUDOPAL formats (it was required + * before the deprecation, though). */ #define AV_PIX_FMT_FLAG_PSEUDOPAL (1 << 6) /** * The pixel format has an alpha channel. This is set on all formats that - * support alpha in some way. The exception is AV_PIX_FMT_PAL8, which can - * carry alpha as part of the palette. Details are explained in the - * AVPixelFormat enum, and are also encoded in the corresponding - * AVPixFmtDescriptor. - * - * The alpha is always straight, never pre-multiplied. + * support alpha in some way, including AV_PIX_FMT_PAL8. The alpha is always + * straight, never pre-multiplied. * * If a codec or a filter does not support alpha, it should set all alpha to * opaque, or use the equivalent pixel formats without alpha component, e.g. @@ -339,7 +343,13 @@ char *av_get_pix_fmt_string(char *buf, int buf_size, * format writes the values corresponding to the palette * component c in data[1] to dst, rather than the palette indexes in * data[0]. The behavior is undefined if the format is not paletted. + * @param dst_element_size size of elements in dst array (2 or 4 byte) */ +void av_read_image_line2(void *dst, const uint8_t *data[4], + const int linesize[4], const AVPixFmtDescriptor *desc, + int x, int y, int c, int w, int read_pal_component, + int dst_element_size); + void av_read_image_line(uint16_t *dst, const uint8_t *data[4], const int linesize[4], const AVPixFmtDescriptor *desc, int x, int y, int c, int w, int read_pal_component); @@ -357,7 +367,12 @@ void av_read_image_line(uint16_t *dst, const uint8_t *data[4], * @param y the vertical coordinate of the first pixel to write * @param w the width of the line to write, that is the number of * values to write to the image line + * @param src_element_size size of elements in src array (2 or 4 byte) */ +void av_write_image_line2(const void *src, uint8_t *data[4], + const int linesize[4], const AVPixFmtDescriptor *desc, + int x, int y, int c, int w, int src_element_size); + void av_write_image_line(const uint16_t *src, uint8_t *data[4], const int linesize[4], const AVPixFmtDescriptor *desc, int x, int y, int c, int w); diff --git a/ThirdParty/ffmpeg/include/libavutil/pixfmt.h b/ThirdParty/ffmpeg/include/libavutil/pixfmt.h index e184a5667..d78e863d4 100644 --- a/ThirdParty/ffmpeg/include/libavutil/pixfmt.h +++ b/ThirdParty/ffmpeg/include/libavutil/pixfmt.h @@ -42,6 +42,10 @@ * This is stored as BGRA on little-endian CPU architectures and ARGB on * big-endian CPUs. * + * @note + * If the resolution is not a multiple of the chroma subsampling factor + * then the chroma plane resolution must be rounded up. + * * @par * When the pixel format is palettized RGB32 (AV_PIX_FMT_PAL8), the palettized * image data is stored in AVFrame.data[0]. The palette is transported in @@ -330,6 +334,20 @@ enum AVPixelFormat { */ AV_PIX_FMT_OPENCL, + AV_PIX_FMT_GRAY14BE, ///< Y , 14bpp, big-endian + AV_PIX_FMT_GRAY14LE, ///< Y , 14bpp, little-endian + + AV_PIX_FMT_GRAYF32BE, ///< IEEE-754 single precision Y, 32bpp, big-endian + AV_PIX_FMT_GRAYF32LE, ///< IEEE-754 single precision Y, 32bpp, little-endian + + AV_PIX_FMT_YUVA422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, big-endian + AV_PIX_FMT_YUVA422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, little-endian + AV_PIX_FMT_YUVA444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, big-endian + AV_PIX_FMT_YUVA444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, little-endian + + AV_PIX_FMT_NV24, ///< planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) + AV_PIX_FMT_NV42, ///< as above, but U and V bytes are swapped + AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions }; @@ -349,6 +367,7 @@ enum AVPixelFormat { #define AV_PIX_FMT_GRAY9 AV_PIX_FMT_NE(GRAY9BE, GRAY9LE) #define AV_PIX_FMT_GRAY10 AV_PIX_FMT_NE(GRAY10BE, GRAY10LE) #define AV_PIX_FMT_GRAY12 AV_PIX_FMT_NE(GRAY12BE, GRAY12LE) +#define AV_PIX_FMT_GRAY14 AV_PIX_FMT_NE(GRAY14BE, GRAY14LE) #define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE) #define AV_PIX_FMT_YA16 AV_PIX_FMT_NE(YA16BE, YA16LE) #define AV_PIX_FMT_RGB48 AV_PIX_FMT_NE(RGB48BE, RGB48LE) @@ -397,12 +416,16 @@ enum AVPixelFormat { #define AV_PIX_FMT_GBRPF32 AV_PIX_FMT_NE(GBRPF32BE, GBRPF32LE) #define AV_PIX_FMT_GBRAPF32 AV_PIX_FMT_NE(GBRAPF32BE, GBRAPF32LE) +#define AV_PIX_FMT_GRAYF32 AV_PIX_FMT_NE(GRAYF32BE, GRAYF32LE) + #define AV_PIX_FMT_YUVA420P9 AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE) #define AV_PIX_FMT_YUVA422P9 AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE) #define AV_PIX_FMT_YUVA444P9 AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE) #define AV_PIX_FMT_YUVA420P10 AV_PIX_FMT_NE(YUVA420P10BE, YUVA420P10LE) #define AV_PIX_FMT_YUVA422P10 AV_PIX_FMT_NE(YUVA422P10BE, YUVA422P10LE) #define AV_PIX_FMT_YUVA444P10 AV_PIX_FMT_NE(YUVA444P10BE, YUVA444P10LE) +#define AV_PIX_FMT_YUVA422P12 AV_PIX_FMT_NE(YUVA422P12BE, YUVA422P12LE) +#define AV_PIX_FMT_YUVA444P12 AV_PIX_FMT_NE(YUVA444P12BE, YUVA444P12LE) #define AV_PIX_FMT_YUVA420P16 AV_PIX_FMT_NE(YUVA420P16BE, YUVA420P16LE) #define AV_PIX_FMT_YUVA422P16 AV_PIX_FMT_NE(YUVA422P16BE, YUVA422P16LE) #define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE) @@ -433,7 +456,8 @@ enum AVColorPrimaries { AVCOL_PRI_SMPTEST428_1 = AVCOL_PRI_SMPTE428, AVCOL_PRI_SMPTE431 = 11, ///< SMPTE ST 431-2 (2011) / DCI P3 AVCOL_PRI_SMPTE432 = 12, ///< SMPTE ST 432-1 (2010) / P3 D65 / Display P3 - AVCOL_PRI_JEDEC_P22 = 22, ///< JEDEC P22 phosphors + AVCOL_PRI_EBU3213 = 22, ///< EBU Tech. 3213-E / JEDEC P22 phosphors + AVCOL_PRI_JEDEC_P22 = AVCOL_PRI_EBU3213, AVCOL_PRI_NB ///< Not part of ABI }; diff --git a/ThirdParty/ffmpeg/include/libavutil/threadmessage.h b/ThirdParty/ffmpeg/include/libavutil/threadmessage.h index 8480a0a3d..42ce655f3 100644 --- a/ThirdParty/ffmpeg/include/libavutil/threadmessage.h +++ b/ThirdParty/ffmpeg/include/libavutil/threadmessage.h @@ -95,6 +95,14 @@ void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, void av_thread_message_queue_set_free_func(AVThreadMessageQueue *mq, void (*free_func)(void *msg)); +/** + * Return the current number of messages in the queue. + * + * @return the current number of messages or AVERROR(ENOSYS) if lavu was built + * without thread support + */ +int av_thread_message_queue_nb_elems(AVThreadMessageQueue *mq); + /** * Flush the message queue * diff --git a/ThirdParty/ffmpeg/include/libavutil/tx.h b/ThirdParty/ffmpeg/include/libavutil/tx.h new file mode 100644 index 000000000..d6cdfdf9f --- /dev/null +++ b/ThirdParty/ffmpeg/include/libavutil/tx.h @@ -0,0 +1,93 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_TX_H +#define AVUTIL_TX_H + +#include +#include + +typedef struct AVTXContext AVTXContext; + +typedef struct AVComplexFloat { + float re, im; +} AVComplexFloat; + +typedef struct AVComplexDouble { + double re, im; +} AVComplexDouble; + +enum AVTXType { + /** + * Standard complex to complex FFT with sample data type AVComplexFloat. + * Scaling currently unsupported + */ + AV_TX_FLOAT_FFT = 0, + /** + * Standard MDCT with sample data type of float and a scale type of + * float. Length is the frame size, not the window size (which is 2x frame) + */ + AV_TX_FLOAT_MDCT = 1, + /** + * Same as AV_TX_FLOAT_FFT with a data type of AVComplexDouble. + */ + AV_TX_DOUBLE_FFT = 2, + /** + * Same as AV_TX_FLOAT_MDCT with data and scale type of double. + */ + AV_TX_DOUBLE_MDCT = 3, +}; + +/** + * Function pointer to a function to perform the transform. + * + * @note Using a different context than the one allocated during av_tx_init() + * is not allowed. + * + * @param s the transform context + * @param out the output array + * @param in the input array + * @param stride the input or output stride (depending on transform direction) + * in bytes, currently implemented for all MDCT transforms + */ +typedef void (*av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride); + +/** + * Initialize a transform context with the given configuration + * Currently power of two lengths from 4 to 131072 are supported, along with + * any length decomposable to a power of two and either 3, 5 or 15. + * + * @param ctx the context to allocate, will be NULL on error + * @param tx pointer to the transform function pointer to set + * @param type type the type of transform + * @param inv whether to do an inverse or a forward transform + * @param len the size of the transform in samples + * @param scale pointer to the value to scale the output if supported by type + * @param flags currently unused + * + * @return 0 on success, negative error code on failure + */ +int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, + int inv, int len, const void *scale, uint64_t flags); + +/** + * Frees a context and sets ctx to NULL, does nothing when ctx == NULL + */ +void av_tx_uninit(AVTXContext **ctx); + +#endif /* AVUTIL_TX_H */ diff --git a/ThirdParty/ffmpeg/include/libavutil/version.h b/ThirdParty/ffmpeg/include/libavutil/version.h index a2a820aeb..27d663baf 100644 --- a/ThirdParty/ffmpeg/include/libavutil/version.h +++ b/ThirdParty/ffmpeg/include/libavutil/version.h @@ -79,7 +79,7 @@ */ #define LIBAVUTIL_VERSION_MAJOR 56 -#define LIBAVUTIL_VERSION_MINOR 7 +#define LIBAVUTIL_VERSION_MINOR 35 #define LIBAVUTIL_VERSION_MICRO 101 #define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \ @@ -126,6 +126,9 @@ #ifndef FF_API_FRAME_GET_SET #define FF_API_FRAME_GET_SET (LIBAVUTIL_VERSION_MAJOR < 57) #endif +#ifndef FF_API_PSEUDOPAL +#define FF_API_PSEUDOPAL (LIBAVUTIL_VERSION_MAJOR < 57) +#endif /** diff --git a/ThirdParty/ffmpeg/include/libswresample/version.h b/ThirdParty/ffmpeg/include/libswresample/version.h index 2640b1067..c07943416 100644 --- a/ThirdParty/ffmpeg/include/libswresample/version.h +++ b/ThirdParty/ffmpeg/include/libswresample/version.h @@ -29,8 +29,8 @@ #include "libavutil/avutil.h" #define LIBSWRESAMPLE_VERSION_MAJOR 3 -#define LIBSWRESAMPLE_VERSION_MINOR 0 -#define LIBSWRESAMPLE_VERSION_MICRO 101 +#define LIBSWRESAMPLE_VERSION_MINOR 6 +#define LIBSWRESAMPLE_VERSION_MICRO 100 #define LIBSWRESAMPLE_VERSION_INT AV_VERSION_INT(LIBSWRESAMPLE_VERSION_MAJOR, \ LIBSWRESAMPLE_VERSION_MINOR, \ diff --git a/ThirdParty/ffmpeg/lib/libavcodec.a b/ThirdParty/ffmpeg/lib/libavcodec.a index d45a70cbb..28e2c84a2 100644 Binary files a/ThirdParty/ffmpeg/lib/libavcodec.a and b/ThirdParty/ffmpeg/lib/libavcodec.a differ diff --git a/ThirdParty/ffmpeg/lib/libavformat.a b/ThirdParty/ffmpeg/lib/libavformat.a index bcf04e9e0..963de8141 100644 Binary files a/ThirdParty/ffmpeg/lib/libavformat.a and b/ThirdParty/ffmpeg/lib/libavformat.a differ diff --git a/ThirdParty/ffmpeg/lib/libavutil.a b/ThirdParty/ffmpeg/lib/libavutil.a index 6ce306f2a..a1f51232b 100644 Binary files a/ThirdParty/ffmpeg/lib/libavutil.a and b/ThirdParty/ffmpeg/lib/libavutil.a differ diff --git a/ThirdParty/ffmpeg/lib/libopus.a b/ThirdParty/ffmpeg/lib/libopus.a new file mode 100644 index 000000000..2e80c7b8f Binary files /dev/null and b/ThirdParty/ffmpeg/lib/libopus.a differ diff --git a/ThirdParty/ffmpeg/lib/libswresample.a b/ThirdParty/ffmpeg/lib/libswresample.a index 529f711c6..f528d3980 100644 Binary files a/ThirdParty/ffmpeg/lib/libswresample.a and b/ThirdParty/ffmpeg/lib/libswresample.a differ diff --git a/ThirdParty/ffmpeg/lib/pkgconfig/libavcodec.pc b/ThirdParty/ffmpeg/lib/pkgconfig/libavcodec.pc index 9681afb65..e1758a595 100644 --- a/ThirdParty/ffmpeg/lib/pkgconfig/libavcodec.pc +++ b/ThirdParty/ffmpeg/lib/pkgconfig/libavcodec.pc @@ -5,10 +5,10 @@ includedir=/Users/chris/Source/Repos/cog/ThirdParty/ffmpeg/include Name: libavcodec Description: FFmpeg codec library -Version: 58.11.101 -Requires: libswresample >= 3.0.101, libavutil >= 56.7.101 +Version: 58.59.102 +Requires: libavutil >= 56.35.101 Requires.private: Conflicts: -Libs: -L${libdir} -lavcodec -pthread -lm +Libs: -L${libdir} -lavcodec -pthread -liconv -lm -L/usr/local/Cellar/opus/1.3.1/lib -lopus Libs.private: Cflags: -I${includedir} diff --git a/ThirdParty/ffmpeg/lib/pkgconfig/libavformat.pc b/ThirdParty/ffmpeg/lib/pkgconfig/libavformat.pc index 3d4af2ad8..6c8aad315 100644 --- a/ThirdParty/ffmpeg/lib/pkgconfig/libavformat.pc +++ b/ThirdParty/ffmpeg/lib/pkgconfig/libavformat.pc @@ -5,8 +5,8 @@ includedir=/Users/chris/Source/Repos/cog/ThirdParty/ffmpeg/include Name: libavformat Description: FFmpeg container format library -Version: 58.9.100 -Requires: libavcodec >= 58.11.101, libswresample >= 3.0.101, libavutil >= 56.7.101 +Version: 58.33.100 +Requires: libavcodec >= 58.59.102, libavutil >= 56.35.101 Requires.private: Conflicts: Libs: -L${libdir} -lavformat -lm -lz diff --git a/ThirdParty/ffmpeg/lib/pkgconfig/libavutil.pc b/ThirdParty/ffmpeg/lib/pkgconfig/libavutil.pc index 56b9ae9a2..5bcb1bfe9 100644 --- a/ThirdParty/ffmpeg/lib/pkgconfig/libavutil.pc +++ b/ThirdParty/ffmpeg/lib/pkgconfig/libavutil.pc @@ -5,7 +5,7 @@ includedir=/Users/chris/Source/Repos/cog/ThirdParty/ffmpeg/include Name: libavutil Description: FFmpeg utility library -Version: 56.7.101 +Version: 56.35.101 Requires: Requires.private: Conflicts: diff --git a/ThirdParty/ffmpeg/lib/pkgconfig/libswresample.pc b/ThirdParty/ffmpeg/lib/pkgconfig/libswresample.pc index 3c5ed7d3c..d61751246 100644 --- a/ThirdParty/ffmpeg/lib/pkgconfig/libswresample.pc +++ b/ThirdParty/ffmpeg/lib/pkgconfig/libswresample.pc @@ -5,8 +5,8 @@ includedir=/Users/chris/Source/Repos/cog/ThirdParty/ffmpeg/include Name: libswresample Description: FFmpeg audio resampling library -Version: 3.0.101 -Requires: libavutil >= 56.7.101 +Version: 3.6.100 +Requires: libavutil >= 56.35.101 Requires.private: Conflicts: Libs: -L${libdir} -lswresample -lm diff --git a/ThirdParty/ffmpeg/share/ffmpeg/examples/avio_reading.c b/ThirdParty/ffmpeg/share/ffmpeg/examples/avio_reading.c index cbfeb174b..36ee02afa 100644 --- a/ThirdParty/ffmpeg/share/ffmpeg/examples/avio_reading.c +++ b/ThirdParty/ffmpeg/share/ffmpeg/examples/avio_reading.c @@ -117,11 +117,12 @@ int main(int argc, char *argv[]) end: avformat_close_input(&fmt_ctx); + /* note: the internal buffer could have changed, and be != avio_ctx_buffer */ - if (avio_ctx) { + if (avio_ctx) av_freep(&avio_ctx->buffer); - av_freep(&avio_ctx); - } + avio_context_free(&avio_ctx); + av_file_unmap(buffer, buffer_size); if (ret < 0) { diff --git a/ThirdParty/ffmpeg/share/ffmpeg/examples/decode_audio.c b/ThirdParty/ffmpeg/share/ffmpeg/examples/decode_audio.c index 19dcafd2c..6c2a8ed55 100644 --- a/ThirdParty/ffmpeg/share/ffmpeg/examples/decode_audio.c +++ b/ThirdParty/ffmpeg/share/ffmpeg/examples/decode_audio.c @@ -39,6 +39,35 @@ #define AUDIO_INBUF_SIZE 20480 #define AUDIO_REFILL_THRESH 4096 +static int get_format_from_sample_fmt(const char **fmt, + enum AVSampleFormat sample_fmt) +{ + int i; + struct sample_fmt_entry { + enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le; + } sample_fmt_entries[] = { + { AV_SAMPLE_FMT_U8, "u8", "u8" }, + { AV_SAMPLE_FMT_S16, "s16be", "s16le" }, + { AV_SAMPLE_FMT_S32, "s32be", "s32le" }, + { AV_SAMPLE_FMT_FLT, "f32be", "f32le" }, + { AV_SAMPLE_FMT_DBL, "f64be", "f64le" }, + }; + *fmt = NULL; + + for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) { + struct sample_fmt_entry *entry = &sample_fmt_entries[i]; + if (sample_fmt == entry->sample_fmt) { + *fmt = AV_NE(entry->fmt_be, entry->fmt_le); + return 0; + } + } + + fprintf(stderr, + "sample format %s is not supported as output format\n", + av_get_sample_fmt_name(sample_fmt)); + return -1; +} + static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile) { @@ -86,6 +115,9 @@ int main(int argc, char **argv) size_t data_size; AVPacket *pkt; AVFrame *decoded_frame = NULL; + enum AVSampleFormat sfmt; + int n_channels = 0; + const char *fmt; if (argc <= 2) { fprintf(stderr, "Usage: %s \n", argv[0]); @@ -172,6 +204,26 @@ int main(int argc, char **argv) pkt->size = 0; decode(c, pkt, decoded_frame, outfile); + /* print output pcm infomations, because there have no metadata of pcm */ + sfmt = c->sample_fmt; + + if (av_sample_fmt_is_planar(sfmt)) { + const char *packed = av_get_sample_fmt_name(sfmt); + printf("Warning: the sample format the decoder produced is planar " + "(%s). This example will output the first channel only.\n", + packed ? packed : "?"); + sfmt = av_get_packed_sample_fmt(sfmt); + } + + n_channels = c->channels; + if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0) + goto end; + + printf("Play the output audio file with the command:\n" + "ffplay -f %s -ac %d -ar %d %s\n", + fmt, n_channels, c->sample_rate, + outfilename); +end: fclose(outfile); fclose(f); diff --git a/ThirdParty/ffmpeg/share/ffmpeg/examples/decode_video.c b/ThirdParty/ffmpeg/share/ffmpeg/examples/decode_video.c index 5a9d43f68..169188a4b 100644 --- a/ThirdParty/ffmpeg/share/ffmpeg/examples/decode_video.c +++ b/ThirdParty/ffmpeg/share/ffmpeg/examples/decode_video.c @@ -95,7 +95,8 @@ int main(int argc, char **argv) AVPacket *pkt; if (argc <= 2) { - fprintf(stderr, "Usage: %s \n", argv[0]); + fprintf(stderr, "Usage: %s \n" + "And check your input file is encoded by mpeg1video please.\n", argv[0]); exit(0); } filename = argv[1]; diff --git a/ThirdParty/ffmpeg/share/ffmpeg/examples/encode_video.c b/ThirdParty/ffmpeg/share/ffmpeg/examples/encode_video.c index 6731b2ad1..d9ab40990 100644 --- a/ThirdParty/ffmpeg/share/ffmpeg/examples/encode_video.c +++ b/ThirdParty/ffmpeg/share/ffmpeg/examples/encode_video.c @@ -186,7 +186,8 @@ int main(int argc, char **argv) encode(c, NULL, pkt, f); /* add sequence end code to have a real MPEG file */ - fwrite(endcode, 1, sizeof(endcode), f); + if (codec->id == AV_CODEC_ID_MPEG1VIDEO || codec->id == AV_CODEC_ID_MPEG2VIDEO) + fwrite(endcode, 1, sizeof(endcode), f); fclose(f); avcodec_free_context(&c); diff --git a/ThirdParty/ffmpeg/share/ffmpeg/examples/filter_audio.c b/ThirdParty/ffmpeg/share/ffmpeg/examples/filter_audio.c index 7467c21c3..1611e3d95 100644 --- a/ThirdParty/ffmpeg/share/ffmpeg/examples/filter_audio.c +++ b/ThirdParty/ffmpeg/share/ffmpeg/examples/filter_audio.c @@ -289,8 +289,6 @@ int main(int argc, char *argv[]) return 1; } - avfilter_register_all(); - /* Allocate the frame we will be using to store the data. */ frame = av_frame_alloc(); if (!frame) { diff --git a/ThirdParty/ffmpeg/share/ffmpeg/examples/filtering_audio.c b/ThirdParty/ffmpeg/share/ffmpeg/examples/filtering_audio.c index 73a00e814..834b137cd 100644 --- a/ThirdParty/ffmpeg/share/ffmpeg/examples/filtering_audio.c +++ b/ThirdParty/ffmpeg/share/ffmpeg/examples/filtering_audio.c @@ -74,7 +74,6 @@ static int open_input_file(const char *filename) if (!dec_ctx) return AVERROR(ENOMEM); avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[audio_stream_index]->codecpar); - av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0); /* init the audio decoder */ if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) { @@ -228,8 +227,6 @@ int main(int argc, char **argv) exit(1); } - avfilter_register_all(); - if ((ret = open_input_file(argv[1])) < 0) goto end; if ((ret = init_filters(filter_descr)) < 0) diff --git a/ThirdParty/ffmpeg/share/ffmpeg/examples/filtering_video.c b/ThirdParty/ffmpeg/share/ffmpeg/examples/filtering_video.c index 01d664462..105a200d9 100644 --- a/ThirdParty/ffmpeg/share/ffmpeg/examples/filtering_video.c +++ b/ThirdParty/ffmpeg/share/ffmpeg/examples/filtering_video.c @@ -29,6 +29,8 @@ #define _XOPEN_SOURCE 600 /* for usleep */ #include +#include +#include #include #include @@ -77,7 +79,6 @@ static int open_input_file(const char *filename) if (!dec_ctx) return AVERROR(ENOMEM); avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[video_stream_index]->codecpar); - av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0); /* init the video decoder */ if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) { @@ -210,19 +211,20 @@ int main(int argc, char **argv) { int ret; AVPacket packet; - AVFrame *frame = av_frame_alloc(); - AVFrame *filt_frame = av_frame_alloc(); + AVFrame *frame; + AVFrame *filt_frame; - if (!frame || !filt_frame) { - perror("Could not allocate frame"); - exit(1); - } if (argc != 2) { fprintf(stderr, "Usage: %s file\n", argv[0]); exit(1); } - avfilter_register_all(); + frame = av_frame_alloc(); + filt_frame = av_frame_alloc(); + if (!frame || !filt_frame) { + perror("Could not allocate frame"); + exit(1); + } if ((ret = open_input_file(argv[1])) < 0) goto end; @@ -250,27 +252,25 @@ int main(int argc, char **argv) goto end; } - if (ret >= 0) { - frame->pts = frame->best_effort_timestamp; + frame->pts = frame->best_effort_timestamp; - /* push the decoded frame into the filtergraph */ - if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) { - av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); - break; - } - - /* pull filtered frames from the filtergraph */ - while (1) { - ret = av_buffersink_get_frame(buffersink_ctx, filt_frame); - if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) - break; - if (ret < 0) - goto end; - display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base); - av_frame_unref(filt_frame); - } - av_frame_unref(frame); + /* push the decoded frame into the filtergraph */ + if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) { + av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); + break; } + + /* pull filtered frames from the filtergraph */ + while (1) { + ret = av_buffersink_get_frame(buffersink_ctx, filt_frame); + if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) + break; + if (ret < 0) + goto end; + display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base); + av_frame_unref(filt_frame); + } + av_frame_unref(frame); } } av_packet_unref(&packet); diff --git a/ThirdParty/ffmpeg/share/ffmpeg/examples/hw_decode.c b/ThirdParty/ffmpeg/share/ffmpeg/examples/hw_decode.c index 14fe08b37..f3286f472 100644 --- a/ThirdParty/ffmpeg/share/ffmpeg/examples/hw_decode.c +++ b/ThirdParty/ffmpeg/share/ffmpeg/examples/hw_decode.c @@ -4,21 +4,23 @@ * * HW Acceleration API (video decoding) decode sample * - * This file is part of FFmpeg. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. */ /** @@ -86,7 +88,7 @@ static int decode_write(AVCodecContext *avctx, AVPacket *packet) return ret; } - while (ret >= 0) { + while (1) { if (!(frame = av_frame_alloc()) || !(sw_frame = av_frame_alloc())) { fprintf(stderr, "Can not alloc frame\n"); ret = AVERROR(ENOMEM); @@ -138,13 +140,10 @@ static int decode_write(AVCodecContext *avctx, AVPacket *packet) fail: av_frame_free(&frame); av_frame_free(&sw_frame); - if (buffer) - av_freep(&buffer); + av_freep(&buffer); if (ret < 0) return ret; } - - return 0; } int main(int argc, char *argv[]) @@ -214,7 +213,6 @@ int main(int argc, char *argv[]) return -1; decoder_ctx->get_format = get_hw_format; - av_opt_set_int(decoder_ctx, "refcounted_frames", 1, 0); if (hw_decoder_init(decoder_ctx, type) < 0) return -1; diff --git a/ThirdParty/ffmpeg/share/ffmpeg/examples/metadata.c b/ThirdParty/ffmpeg/share/ffmpeg/examples/metadata.c index e330d077a..b6cfa6bd3 100644 --- a/ThirdParty/ffmpeg/share/ffmpeg/examples/metadata.c +++ b/ThirdParty/ffmpeg/share/ffmpeg/examples/metadata.c @@ -47,6 +47,11 @@ int main (int argc, char **argv) if ((ret = avformat_open_input(&fmt_ctx, argv[1], NULL, NULL))) return ret; + if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n"); + return ret; + } + while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) printf("%s=%s\n", tag->key, tag->value); diff --git a/ThirdParty/ffmpeg/share/ffmpeg/examples/muxing.c b/ThirdParty/ffmpeg/share/ffmpeg/examples/muxing.c index 08da98e57..9af9aae48 100644 --- a/ThirdParty/ffmpeg/share/ffmpeg/examples/muxing.c +++ b/ThirdParty/ffmpeg/share/ffmpeg/examples/muxing.c @@ -285,7 +285,7 @@ static AVFrame *get_audio_frame(OutputStream *ost) /* check if we want to generate more frames */ if (av_compare_ts(ost->next_pts, ost->enc->time_base, - STREAM_DURATION, (AVRational){ 1, 1 }) >= 0) + STREAM_DURATION, (AVRational){ 1, 1 }) > 0) return NULL; for (j = 0; j nb_samples; j++) { @@ -464,7 +464,7 @@ static AVFrame *get_video_frame(OutputStream *ost) /* check if we want to generate more frames */ if (av_compare_ts(ost->next_pts, c->time_base, - STREAM_DURATION, (AVRational){ 1, 1 }) >= 0) + STREAM_DURATION, (AVRational){ 1, 1 }) > 0) return NULL; /* when we pass a frame to the encoder, it may keep a reference to it diff --git a/ThirdParty/ffmpeg/share/ffmpeg/examples/transcode_aac.c b/ThirdParty/ffmpeg/share/ffmpeg/examples/transcode_aac.c index b19349573..e0c76f5b3 100644 --- a/ThirdParty/ffmpeg/share/ffmpeg/examples/transcode_aac.c +++ b/ThirdParty/ffmpeg/share/ffmpeg/examples/transcode_aac.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2017 Andreas Unterweger + * Copyright (c) 2013-2018 Andreas Unterweger * * This file is part of FFmpeg. * @@ -387,24 +387,39 @@ static int decode_audio_frame(AVFrame *frame, } } - /* Decode the audio frame stored in the temporary packet. - * The input audio stream decoder is used to do this. - * If we are at the end of the file, pass an empty packet to the decoder - * to flush it. */ - if ((error = avcodec_decode_audio4(input_codec_context, frame, - data_present, &input_packet)) < 0) { - fprintf(stderr, "Could not decode frame (error '%s')\n", + /* Send the audio frame stored in the temporary packet to the decoder. + * The input audio stream decoder is used to do this. */ + if ((error = avcodec_send_packet(input_codec_context, &input_packet)) < 0) { + fprintf(stderr, "Could not send packet for decoding (error '%s')\n", av_err2str(error)); - av_packet_unref(&input_packet); return error; } - /* If the decoder has not been flushed completely, we are not finished, - * so that this function has to be called again. */ - if (*finished && *data_present) - *finished = 0; + /* Receive one frame from the decoder. */ + error = avcodec_receive_frame(input_codec_context, frame); + /* If the decoder asks for more data to be able to decode a frame, + * return indicating that no data is present. */ + if (error == AVERROR(EAGAIN)) { + error = 0; + goto cleanup; + /* If the end of the input file is reached, stop decoding. */ + } else if (error == AVERROR_EOF) { + *finished = 1; + error = 0; + goto cleanup; + } else if (error < 0) { + fprintf(stderr, "Could not decode frame (error '%s')\n", + av_err2str(error)); + goto cleanup; + /* Default case: Return decoded data. */ + } else { + *data_present = 1; + goto cleanup; + } + +cleanup: av_packet_unref(&input_packet); - return 0; + return error; } /** @@ -538,7 +553,7 @@ static int read_decode_convert_and_store(AVAudioFifo *fifo, AVFrame *input_frame = NULL; /* Temporary storage for the converted input samples. */ uint8_t **converted_input_samples = NULL; - int data_present; + int data_present = 0; int ret = AVERROR_EXIT; /* Initialize temporary storage for one input frame. */ @@ -551,7 +566,7 @@ static int read_decode_convert_and_store(AVAudioFifo *fifo, /* If we are at the end of the file and there are no more samples * in the decoder which are delayed, we are actually finished. * This must not be treated as an error. */ - if (*finished && !data_present) { + if (*finished) { ret = 0; goto cleanup; } @@ -637,7 +652,7 @@ static int64_t pts = 0; * @param output_format_context Format context of the output file * @param output_codec_context Codec context of the output file * @param[out] data_present Indicates whether data has been - * decoded + * encoded * @return Error code (0 if successful) */ static int encode_audio_frame(AVFrame *frame, @@ -656,29 +671,50 @@ static int encode_audio_frame(AVFrame *frame, pts += frame->nb_samples; } - /* Encode the audio frame and store it in the temporary packet. + /* Send the audio frame stored in the temporary packet to the encoder. * The output audio stream encoder is used to do this. */ - if ((error = avcodec_encode_audio2(output_codec_context, &output_packet, - frame, data_present)) < 0) { - fprintf(stderr, "Could not encode frame (error '%s')\n", + error = avcodec_send_frame(output_codec_context, frame); + /* The encoder signals that it has nothing more to encode. */ + if (error == AVERROR_EOF) { + error = 0; + goto cleanup; + } else if (error < 0) { + fprintf(stderr, "Could not send packet for encoding (error '%s')\n", av_err2str(error)); - av_packet_unref(&output_packet); return error; } - /* Write one audio frame from the temporary packet to the output file. */ - if (*data_present) { - if ((error = av_write_frame(output_format_context, &output_packet)) < 0) { - fprintf(stderr, "Could not write frame (error '%s')\n", - av_err2str(error)); - av_packet_unref(&output_packet); - return error; - } - - av_packet_unref(&output_packet); + /* Receive one encoded frame from the encoder. */ + error = avcodec_receive_packet(output_codec_context, &output_packet); + /* If the encoder asks for more data to be able to provide an + * encoded frame, return indicating that no data is present. */ + if (error == AVERROR(EAGAIN)) { + error = 0; + goto cleanup; + /* If the last frame has been encoded, stop encoding. */ + } else if (error == AVERROR_EOF) { + error = 0; + goto cleanup; + } else if (error < 0) { + fprintf(stderr, "Could not encode frame (error '%s')\n", + av_err2str(error)); + goto cleanup; + /* Default case: Return encoded data. */ + } else { + *data_present = 1; } - return 0; + /* Write one audio frame from the temporary packet to the output file. */ + if (*data_present && + (error = av_write_frame(output_format_context, &output_packet)) < 0) { + fprintf(stderr, "Could not write frame (error '%s')\n", + av_err2str(error)); + goto cleanup; + } + +cleanup: + av_packet_unref(&output_packet); + return error; } /** @@ -816,6 +852,7 @@ int main(int argc, char **argv) int data_written; /* Flush the encoder as it may have delayed frames. */ do { + data_written = 0; if (encode_audio_frame(NULL, output_format_context, output_codec_context, &data_written)) goto cleanup; diff --git a/ThirdParty/ffmpeg/share/ffmpeg/examples/transcoding.c b/ThirdParty/ffmpeg/share/ffmpeg/examples/transcoding.c index ed1fd6411..e48837cbd 100644 --- a/ThirdParty/ffmpeg/share/ffmpeg/examples/transcoding.c +++ b/ThirdParty/ffmpeg/share/ffmpeg/examples/transcoding.c @@ -172,6 +172,9 @@ static int open_output_file(const char *filename) enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate}; } + if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) + enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; + /* Third parameter can be used to pass settings to encoder */ ret = avcodec_open2(enc_ctx, encoder, NULL); if (ret < 0) { @@ -183,8 +186,6 @@ static int open_output_file(const char *filename) av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i); return ret; } - if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) - enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; out_stream->time_base = enc_ctx->time_base; stream_ctx[i].enc_ctx = enc_ctx; @@ -517,8 +518,6 @@ int main(int argc, char **argv) return 1; } - avfilter_register_all(); - if ((ret = open_input_file(argv[1])) < 0) goto end; if ((ret = open_output_file(argv[2])) < 0) diff --git a/ThirdParty/ffmpeg/share/ffmpeg/examples/vaapi_encode.c b/ThirdParty/ffmpeg/share/ffmpeg/examples/vaapi_encode.c index 3bdc62bef..98fd5d3b5 100644 --- a/ThirdParty/ffmpeg/share/ffmpeg/examples/vaapi_encode.c +++ b/ThirdParty/ffmpeg/share/ffmpeg/examples/vaapi_encode.c @@ -1,21 +1,23 @@ /* * Video Acceleration API (video encoding) encode sample * - * This file is part of FFmpeg. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. */ /** diff --git a/ThirdParty/ffmpeg/share/ffmpeg/examples/vaapi_transcode.c b/ThirdParty/ffmpeg/share/ffmpeg/examples/vaapi_transcode.c index 649f48b97..279d20f63 100644 --- a/ThirdParty/ffmpeg/share/ffmpeg/examples/vaapi_transcode.c +++ b/ThirdParty/ffmpeg/share/ffmpeg/examples/vaapi_transcode.c @@ -1,21 +1,23 @@ /* * Video Acceleration API (video transcoding) transcode sample * - * This file is part of FFmpeg. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. */ /** @@ -177,7 +179,7 @@ static int dec_enc(AVPacket *pkt, AVCodec *enc_codec) } /* set AVCodecContext Parameters for encoder, here we keep them stay * the same as decoder. - * xxx: now the the sample can't handle resolution change case. + * xxx: now the sample can't handle resolution change case. */ encoder_ctx->time_base = av_inv_q(decoder_ctx->framerate); encoder_ctx->pix_fmt = AV_PIX_FMT_VAAPI; diff --git a/Xcode-config/Shared.xcconfig b/Xcode-config/Shared.xcconfig new file mode 100644 index 000000000..37d0deebe --- /dev/null +++ b/Xcode-config/Shared.xcconfig @@ -0,0 +1,38 @@ +#include "DEVELOPMENT_TEAM.xcconfig" + +// Create the file DEVELOPMENT_TEAM.xcconfig +// in the "Xcode-config" directory within the project directory +// with the following build setting: +// DEVELOPMENT_TEAM = [Your TeamID] + +// Hint: recent Xcode versions appear to automatically create an empty file +// for you on the first build. This build will fail, or course, +// because code-signing can’t work without the DEVELOPMENT_TEAM set. +// Just fill it in and everything should work. + +// You can find your team ID by logging into your Apple Developer account +// and going to +// https://developer.apple.com/account/#/membership +// It should be listed under “Team ID”. + +// To set this system up for your own project, +// copy the "Xcode-config" directory there, +// add it to your Xcode project, +// navigate to your project settings +// (root icon in the Xcode Project Navigator) +// click on the project icon there, +// click on the “Info” tab +// under “Configurations” +// open the “Debug”, “Release”, +// and any other build configurations you might have. +// There you can set the pull-down menus in the +// “Based on Configuration File” column to “Shared”. +// Done. + +// Don’t forget to add the DEVELOPMENT_TEAM.xcconfig file to your .gitignore: +// # User-specific xcconfig files +// Xcode-config/DEVELOPMENT_TEAM.xcconfig + +// You can now remove the “DevelopmentTeam = AB1234C5DE;” entries from the +// .xcodeproj/project.pbxproj if you want to. +