Compare commits
41 Commits
Author | SHA1 | Date |
---|---|---|
mrbesen | 8d36418bbe | |
mrbesen | c8e9ee7319 | |
mrbesen | a202604c32 | |
mrbesen | a96b3a1a9c | |
mrbesen | 7af344b2a4 | |
mrbesen | 8b7dd33a3b | |
mrbesen | e7daaff5d3 | |
mrbesen | e94ac81cdf | |
mrbesen | 66ef79cfec | |
mrbesen | 409eb64537 | |
mrbesen | 24e560d76a | |
MrBesen | 3d3331605c | |
mrbesen | 5604fc76b5 | |
mrbesen | 850482faf8 | |
mrbesen | 02d2c088fd | |
MrBesen | bd711a65ad | |
MrBesen | 61b27ed1a1 | |
mrbesen | ee4b6ece21 | |
mrbesen | c05904cc3e | |
mrbesen | 3999157199 | |
MrBesen | 57891c3761 | |
mrbesen | 7402574e7a | |
mrbesen | 70a6c9f3b1 | |
MrBesen | f17254b886 | |
MrBesen | 693be22a67 | |
mrbesen | fb73f9dda7 | |
mrbesen | 82f39f34ad | |
MrBesen | d31d27dd34 | |
MrBesen | 705bb1a7b9 | |
mrbesen | 1db161ec93 | |
MrBesen | a1b65bf80a | |
MrBesen | 438b0eae71 | |
MrBesen | c59bf28d26 | |
MrBesen | 331a95f44b | |
MrBesen | 9fee0e07d6 | |
mrbesen | 6a12ca01a0 | |
MrBesen | 4eb5c276cc | |
MrBesen | 3267eae9b9 | |
mrbesen | 88bd36b16b | |
mrbesen | e0eed46bfc | |
MrBesen | 069ec5af69 |
|
@ -72,7 +72,7 @@ AVDictionary *format_opts, *codec_opts;
|
|||
|
||||
static FILE *report_file;
|
||||
static int report_file_level = AV_LOG_DEBUG;
|
||||
int hide_banner = 0;
|
||||
int hide_banner = 1;
|
||||
|
||||
enum show_muxdemuxers {
|
||||
SHOW_DEFAULT,
|
||||
|
@ -1603,7 +1603,7 @@ int show_codecs(void *optctx, const char *opt, const char *arg)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void print_codecs(int encoder)
|
||||
void print_codecs(int encoder)
|
||||
{
|
||||
const AVCodecDescriptor **codecs;
|
||||
unsigned i, nb_codecs = get_codecs_sorted(&codecs);
|
||||
|
@ -1642,6 +1642,22 @@ static void print_codecs(int encoder)
|
|||
av_free(codecs);
|
||||
}
|
||||
|
||||
void print_codecs_short(int encoder) {
|
||||
const AVCodecDescriptor **codecs;
|
||||
unsigned i, nb_codecs = get_codecs_sorted(&codecs);
|
||||
|
||||
for (i = 0; i < nb_codecs; i++) {
|
||||
const AVCodecDescriptor *desc = codecs[i];
|
||||
const AVCodec *codec = NULL;
|
||||
|
||||
while ((codec = next_codec_for_id(desc->id, codec, encoder))) {
|
||||
printf(" %s", codec->name);
|
||||
}
|
||||
}
|
||||
printf("\n");
|
||||
av_free(codecs);
|
||||
}
|
||||
|
||||
int show_decoders(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
print_codecs(0);
|
||||
|
|
|
@ -51,6 +51,9 @@ extern AVDictionary *swr_opts;
|
|||
extern AVDictionary *format_opts, *codec_opts;
|
||||
extern int hide_banner;
|
||||
|
||||
void print_codecs(int encoder);
|
||||
void print_codecs_short(int encoder);
|
||||
|
||||
/**
|
||||
* Register a program-specific cleanup routine.
|
||||
*/
|
||||
|
|
219
fftools/ffmpeg.c
219
fftools/ffmpeg.c
|
@ -158,6 +158,8 @@ int nb_output_files = 0;
|
|||
FilterGraph **filtergraphs;
|
||||
int nb_filtergraphs;
|
||||
|
||||
int64_t max_frames_hint = -1;
|
||||
|
||||
#if HAVE_TERMIOS_H
|
||||
|
||||
/* init terminal so that we can grab keys */
|
||||
|
@ -1055,6 +1057,13 @@ error:
|
|||
exit_program(1);
|
||||
}
|
||||
|
||||
static void convertSecondsTotime(int* secs, int* mins, int* hours) {
|
||||
*mins = *secs / 60;
|
||||
*hours = *mins / 60;
|
||||
*mins %= 60;
|
||||
*secs %= 60;
|
||||
}
|
||||
|
||||
static void do_subtitle_out(OutputFile *of,
|
||||
OutputStream *ost,
|
||||
AVSubtitle *sub)
|
||||
|
@ -1556,7 +1565,7 @@ static int reap_filters(int flush)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void print_final_stats(int64_t total_size)
|
||||
static void print_final_stats(int64_t total_size, int processingtime)
|
||||
{
|
||||
uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
|
||||
uint64_t subtitle_size = 0;
|
||||
|
@ -1583,7 +1592,11 @@ static void print_final_stats(int64_t total_size)
|
|||
if (data_size && total_size>0 && total_size >= data_size)
|
||||
percent = 100.0 * (total_size - data_size) / data_size;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
|
||||
int s = processingtime, m, h;
|
||||
convertSecondsTotime(&s, &m, &h);
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "time=%02d:%02d:%02d video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
|
||||
h, m, s,
|
||||
video_size / 1024.0,
|
||||
audio_size / 1024.0,
|
||||
subtitle_size / 1024.0,
|
||||
|
@ -1673,6 +1686,25 @@ static void print_final_stats(int64_t total_size)
|
|||
}
|
||||
}
|
||||
|
||||
static void print_filesize(int64_t size, char* buffer, uint32_t bufferSize) {
|
||||
if(size <= 0)
|
||||
memcpy(buffer, "N/A", 4);
|
||||
|
||||
int64_t shifted = size >> 20; //calculate mB
|
||||
|
||||
//default to mb
|
||||
char c = 'm';
|
||||
int64_t val = shifted;
|
||||
|
||||
//use kb for small files
|
||||
if(shifted < 25) {
|
||||
c = 'k';
|
||||
val = size >> 10;
|
||||
}
|
||||
|
||||
snprintf(buffer, bufferSize, "%"PRId64"%cB", val, c);
|
||||
}
|
||||
|
||||
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
|
||||
{
|
||||
AVBPrint buf, buf_script;
|
||||
|
@ -1705,8 +1737,7 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
|||
last_time = cur_time;
|
||||
}
|
||||
|
||||
t = (cur_time-timer_start) / 1000000.0;
|
||||
|
||||
t = (cur_time - timer_start) / 1000000.0; //time in seconds, the transcode process is running
|
||||
|
||||
oc = output_files[0]->ctx;
|
||||
|
||||
|
@ -1733,13 +1764,12 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
|||
float fps;
|
||||
|
||||
frame_number = ost->frame_number;
|
||||
fps = t > 1 ? frame_number / t : 0;
|
||||
av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
|
||||
frame_number, fps < 9.95, fps, q);
|
||||
fps = t > 0 ? frame_number / t : 0;
|
||||
av_bprintf(&buf, "frame=%5d fps=%3.*f ",
|
||||
frame_number, fps < 9.95, fps);
|
||||
av_bprintf(&buf_script, "frame=%d\n", frame_number);
|
||||
av_bprintf(&buf_script, "fps=%.2f\n", fps);
|
||||
av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
|
||||
ost->file_index, ost->index, q);
|
||||
|
||||
if (is_last_report)
|
||||
av_bprintf(&buf, "L");
|
||||
if (qp_hist) {
|
||||
|
@ -1800,17 +1830,18 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
|||
|
||||
secs = FFABS(pts) / AV_TIME_BASE;
|
||||
us = FFABS(pts) % AV_TIME_BASE;
|
||||
mins = secs / 60;
|
||||
secs %= 60;
|
||||
hours = mins / 60;
|
||||
mins %= 60;
|
||||
convertSecondsTotime(&secs, &mins, &hours);
|
||||
hours_sign = (pts < 0) ? "-" : "";
|
||||
|
||||
bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
|
||||
speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
|
||||
|
||||
if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
|
||||
else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
|
||||
else {
|
||||
char buff[10];
|
||||
print_filesize(total_size, buff, 10);
|
||||
av_bprintf(&buf, "size=%s time=", buff);
|
||||
}
|
||||
if (pts == AV_NOPTS_VALUE) {
|
||||
av_bprintf(&buf, "N/A ");
|
||||
} else {
|
||||
|
@ -1845,13 +1876,56 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
|||
av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
|
||||
|
||||
if (speed < 0) {
|
||||
av_bprintf(&buf, " speed=N/A");
|
||||
av_bprintf(&buf, " speed=N/A ");
|
||||
av_bprintf(&buf_script, "speed=N/A\n");
|
||||
} else {
|
||||
av_bprintf(&buf, " speed=%4.3gx", speed);
|
||||
av_bprintf(&buf, " speed=%4.3gx ", speed);
|
||||
av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
|
||||
}
|
||||
|
||||
|
||||
//get fps
|
||||
float fps;
|
||||
fps = t > 1 ? frame_number / t : -1;
|
||||
if (fps <= 0 || max_frames_hint < 0) {
|
||||
av_bprintf(&buf, "ETA=N/A");
|
||||
} else {
|
||||
//get remaining frames
|
||||
int64_t remaining_frames = max_frames_hint - frame_number;
|
||||
secs = remaining_frames / fps;
|
||||
|
||||
mins = secs / 60; //secs %= 60 done later
|
||||
hours = mins / 60;
|
||||
mins %= 60;
|
||||
|
||||
//calculate finish date
|
||||
time_t rawtime;
|
||||
struct tm * timeinfo;
|
||||
time(&rawtime);
|
||||
timeinfo = localtime(&rawtime);
|
||||
time_t time = mktime(timeinfo); //current time
|
||||
time_t timef = time + secs; //time finished
|
||||
timeinfo = localtime(&timef);
|
||||
|
||||
convertSecondsTotime(&secs, &mins, &hours);
|
||||
|
||||
char timebuf[20]; //max: 2019-10-10 12:34:00 (18) +1
|
||||
if(timef - time > 86400) //longer than one day?
|
||||
strftime(timebuf, 20, "%F %T", timeinfo); //full information
|
||||
else
|
||||
strftime(timebuf, 20, "%T", timeinfo); //time only
|
||||
|
||||
//estimate size
|
||||
char estsizebuf[20];
|
||||
uint64_t estsize = (total_size / frame_number) * max_frames_hint;
|
||||
if(estsize < total_size) estsize = 0;
|
||||
print_filesize(estsize, estsizebuf, 20);
|
||||
|
||||
float progress = (frame_number / (double) max_frames_hint) * 100;
|
||||
|
||||
av_bprintf(&buf, "Progress=%2.2f%% ETA=%02d:%02d:%02d finish=%s ES=%s", progress, hours, mins, secs, timebuf, estsizebuf);
|
||||
}
|
||||
|
||||
if (print_stats || is_last_report) {
|
||||
const char end = is_last_report ? '\n' : '\r';
|
||||
if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
|
||||
|
@ -1880,7 +1954,7 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
|||
first_report = 0;
|
||||
|
||||
if (is_last_report)
|
||||
print_final_stats(total_size);
|
||||
print_final_stats(total_size, t);
|
||||
}
|
||||
|
||||
static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
|
||||
|
@ -3587,6 +3661,58 @@ static void report_new_stream(int input_index, AVPacket *pkt)
|
|||
file->nb_streams_warn = pkt->stream_index + 1;
|
||||
}
|
||||
|
||||
static int64_t readMeta(const AVDictionary* metadata, const char* name) {
|
||||
AVDictionaryEntry* entry = av_dict_get(metadata, name, NULL, AV_DICT_MATCH_CASE);
|
||||
|
||||
if(!entry) //if exact match does not exists -> try preifx only match
|
||||
entry = av_dict_get(metadata, name, NULL, AV_DICT_IGNORE_SUFFIX);
|
||||
|
||||
if(entry) {
|
||||
char* fm = entry->value;
|
||||
|
||||
//read duration HH:mm:ss.pppppppp
|
||||
uint32_t hour = 0, min = 0, sec = 0, msec = 0;
|
||||
int items = sscanf(fm, "%02d:%02d:%02d.%d", &hour, &min, &sec, &msec);
|
||||
if(items == 4) {//could not parse
|
||||
//to seconds
|
||||
//printf("hms.p: %i, %i, %i, %i\n", hour, min, sec, msec);
|
||||
int64_t num = (((hour * 60) + min) * 60) + sec;
|
||||
return num;
|
||||
}
|
||||
|
||||
int64_t num = atoi(fm); //returns 0 on error
|
||||
|
||||
if(num > 0)
|
||||
return num;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void try_to_read_maxframes(const AVDictionary* metadata, int64_t* out, float fps) {
|
||||
int64_t framecount = readMeta(metadata, "NUMBER_OF_FRAMES");
|
||||
int64_t duration = readMeta(metadata, "DURATION");
|
||||
|
||||
//printf("fc: %i, d: %i fps: %f\n", framecount, duration, fps);
|
||||
|
||||
int64_t dframecount = 0;
|
||||
//approximate framecount with duration
|
||||
if(duration > 0) {
|
||||
dframecount = duration * fps;
|
||||
|
||||
//is the diffrence between duration and framceount bigger than 1%, then use duration as source
|
||||
float diff = fabs(dframecount-framecount) / framecount;
|
||||
if(diff > 0.01f) {
|
||||
framecount = dframecount;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if(framecount > *out)
|
||||
*out = framecount;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int transcode_init(void)
|
||||
{
|
||||
int ret = 0, i, j, k;
|
||||
|
@ -3763,6 +3889,31 @@ static int transcode_init(void)
|
|||
return ret;
|
||||
}
|
||||
|
||||
//get the maxframecount (read it from metadata)
|
||||
if(max_frames_hint == -1) {
|
||||
//printf("entering metadata\n");
|
||||
int64_t newmaxframes = 0;
|
||||
for (i = 0; i < nb_input_files; i++) {
|
||||
//printf("read metadata of file %i\n", i);
|
||||
InputFile *ifile = input_files[i];
|
||||
// ifile->duration; //could be used to calculate maxframes
|
||||
AVDictionary* metadict = ifile->ctx->metadata;
|
||||
try_to_read_maxframes(metadict, &newmaxframes, 30); //just asume 30fps
|
||||
|
||||
//read stream metadata
|
||||
for(int j = 0; j < ifile->ctx->nb_streams; ++j) {
|
||||
//printf("read stream metdata of %i:%i\n", i, j);
|
||||
AVStream* stream = ifile->ctx->streams[j];
|
||||
if(stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
|
||||
try_to_read_maxframes(stream->metadata, &newmaxframes, av_q2d(stream->avg_frame_rate));
|
||||
}
|
||||
}
|
||||
if(newmaxframes > 0)
|
||||
max_frames_hint = newmaxframes;
|
||||
}
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "approximated number of frames: %i\n", max_frames_hint);
|
||||
|
||||
atomic_store(&transcode_init_done, 1);
|
||||
|
||||
return 0;
|
||||
|
@ -3937,6 +4088,9 @@ static int check_keyboard_interaction(int64_t cur_time)
|
|||
if(debug) av_log_set_level(AV_LOG_DEBUG);
|
||||
fprintf(stderr,"debug=%d\n", debug);
|
||||
}
|
||||
if (key == 'p'){
|
||||
return 1;
|
||||
}
|
||||
if (key == '?'){
|
||||
fprintf(stderr, "key function\n"
|
||||
"? show this help\n"
|
||||
|
@ -3948,6 +4102,7 @@ static int check_keyboard_interaction(int64_t cur_time)
|
|||
"h dump packets/hex press to cycle through the 3 states\n"
|
||||
"q quit\n"
|
||||
"s Show QP histogram\n"
|
||||
"p pause / resume\n"
|
||||
);
|
||||
}
|
||||
return 0;
|
||||
|
@ -4660,12 +4815,27 @@ static int transcode(void)
|
|||
#endif
|
||||
|
||||
while (!received_sigterm) {
|
||||
int64_t cur_time= av_gettime_relative();
|
||||
int64_t cur_time = av_gettime_relative();
|
||||
|
||||
/* if 'q' pressed, exits */
|
||||
if (stdin_interaction)
|
||||
if (check_keyboard_interaction(cur_time) < 0)
|
||||
if (stdin_interaction) {
|
||||
int64_t kbinteractionresult = check_keyboard_interaction(cur_time);
|
||||
if (kbinteractionresult < 0)
|
||||
break;
|
||||
if(kbinteractionresult == 1) { //pause
|
||||
fprintf(stderr, "\033[101mPaused\033[0m\r");
|
||||
|
||||
|
||||
//wait for key
|
||||
getchar();
|
||||
int64_t new_cur_time = av_gettime_relative();
|
||||
int64_t paused = new_cur_time - cur_time;
|
||||
cur_time = new_cur_time;
|
||||
timer_start += paused; // shift the transcode start by the time, that was paused -> fix calculation of remaining and fps
|
||||
|
||||
fprintf(stderr, "Unpaused Paused For: %lis \n", (paused / 1000000));
|
||||
}
|
||||
}
|
||||
|
||||
/* check if there's any stream where output is still needed */
|
||||
if (!need_output()) {
|
||||
|
@ -4847,7 +5017,7 @@ int main(int argc, char **argv)
|
|||
|
||||
register_exit(ffmpeg_cleanup);
|
||||
|
||||
setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
|
||||
setvbuf(stderr, NULL,_IONBF,0); /* win32 runtime needs this */
|
||||
|
||||
av_log_set_flags(AV_LOG_SKIP_REPEATED);
|
||||
parse_loglevel(argc, argv, options);
|
||||
|
@ -4871,6 +5041,13 @@ int main(int argc, char **argv)
|
|||
if (ret < 0)
|
||||
exit_program(1);
|
||||
|
||||
//get env
|
||||
{
|
||||
const char* hint;
|
||||
hint = getenv("MAXFRAMES");
|
||||
max_frames_hint = hint != NULL ? strtoll(hint, NULL, 10) : -1;
|
||||
}
|
||||
|
||||
if (nb_output_files <= 0 && nb_input_files == 0) {
|
||||
show_usage();
|
||||
av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
|
||||
|
|
|
@ -21,6 +21,10 @@
|
|||
|
||||
#include <stdint.h>
|
||||
|
||||
//mkdir
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include "ffmpeg.h"
|
||||
#include "cmdutils.h"
|
||||
|
||||
|
@ -750,7 +754,18 @@ static const AVCodec *find_codec_or_die(const char *name, enum AVMediaType type,
|
|||
}
|
||||
|
||||
if (!codec) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Unknown %s '%s'\n", codec_string, name);
|
||||
int c;
|
||||
|
||||
av_log(NULL, AV_LOG_FATAL, "Unknown %s '%s'\n", codec_string, name);
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Print %ss? [y/n]", codec_string);
|
||||
|
||||
c = getchar();
|
||||
if(c == 'y' || c == 'Y') {
|
||||
printf("\n");
|
||||
print_codecs_short(encoder);
|
||||
}
|
||||
|
||||
exit_program(1);
|
||||
}
|
||||
if (codec->type != type && !recast_media) {
|
||||
|
@ -1587,7 +1602,7 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
|||
MATCH_PER_STREAM_OPT(disposition, str, ost->disposition, oc, st);
|
||||
ost->disposition = av_strdup(ost->disposition);
|
||||
|
||||
ost->max_muxing_queue_size = 128;
|
||||
ost->max_muxing_queue_size = 1024;
|
||||
MATCH_PER_STREAM_OPT(max_muxing_queue_size, i, ost->max_muxing_queue_size, oc, st);
|
||||
ost->max_muxing_queue_size = FFMIN(ost->max_muxing_queue_size, INT_MAX / sizeof(ost->pkt));
|
||||
ost->max_muxing_queue_size *= sizeof(ost->pkt);
|
||||
|
@ -2256,6 +2271,39 @@ static int init_complex_filters(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
//creates a Folder and if not existing all parent dirs returns 0 on success and -1 on errror
|
||||
//this function always tries to create the parent of the named file
|
||||
static int createFolder(char* file) {
|
||||
//get parent folder
|
||||
//search for last /
|
||||
char* lastslash;
|
||||
for(char* fnameit = file; *fnameit; ++fnameit) {
|
||||
if((*fnameit) == '/') {
|
||||
lastslash = fnameit;
|
||||
}
|
||||
}
|
||||
|
||||
//copy path to new buffer
|
||||
int lenparent = lastslash-file;
|
||||
char* parent = malloc((lenparent +1) * sizeof(char));
|
||||
memcpy(parent, file, lenparent);
|
||||
parent[lenparent] = '\0';
|
||||
|
||||
//printf("Try to create folder: %s\n", parent);
|
||||
|
||||
//try to create
|
||||
if(mkdir(parent, 0755)) {
|
||||
//error
|
||||
//try to create parent
|
||||
if(createFolder(parent) == 0) {
|
||||
return mkdir(parent, 0755); //try again
|
||||
}
|
||||
return -1; //error
|
||||
}
|
||||
return 0; //success
|
||||
}
|
||||
|
||||
static int open_output_file(OptionsContext *o, const char *filename)
|
||||
{
|
||||
AVFormatContext *oc;
|
||||
|
@ -2683,8 +2731,29 @@ loop_end:
|
|||
if ((err = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE,
|
||||
&oc->interrupt_callback,
|
||||
&of->opts)) < 0) {
|
||||
print_error(filename, err);
|
||||
exit_program(1);
|
||||
|
||||
if(err == -ENOENT) {
|
||||
//file does not exists
|
||||
av_log(NULL, AV_LOG_WARNING, "%s: does not exists - create parent folder and try again [N/y]?", filename);
|
||||
char c = getchar();
|
||||
printf("\n");
|
||||
if(c == 'y' || c == 'Y') {
|
||||
if(createFolder(filename) == -1) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not create Directories\n");
|
||||
exit_program(1);
|
||||
}
|
||||
//try to init stream again
|
||||
if ((err = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE,
|
||||
&oc->interrupt_callback,
|
||||
&of->opts)) < 0) {
|
||||
print_error(filename, err);
|
||||
exit_program(1);
|
||||
}
|
||||
} else {
|
||||
print_error(filename, err);
|
||||
exit_program(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (strcmp(oc->oformat->name, "image2")==0 && !av_filename_number_test(filename))
|
||||
assert_file_overwrite(filename);
|
||||
|
|
|
@ -211,6 +211,7 @@ OBJS-$(CONFIG_COLORCHANNELMIXER_FILTER) += vf_colorchannelmixer.o
|
|||
OBJS-$(CONFIG_COLORCONTRAST_FILTER) += vf_colorcontrast.o
|
||||
OBJS-$(CONFIG_COLORCORRECT_FILTER) += vf_colorcorrect.o
|
||||
OBJS-$(CONFIG_COLORIZE_FILTER) += vf_colorize.o
|
||||
OBJS-$(CONFIG_COLORIZELSD_FILTER) += vf_colorizelsd.o
|
||||
OBJS-$(CONFIG_COLORKEY_FILTER) += vf_colorkey.o
|
||||
OBJS-$(CONFIG_COLORKEY_OPENCL_FILTER) += vf_colorkey_opencl.o opencl.o \
|
||||
opencl/colorkey.o
|
||||
|
@ -348,6 +349,7 @@ OBJS-$(CONFIG_MASKEDMERGE_FILTER) += vf_maskedmerge.o framesync.o
|
|||
OBJS-$(CONFIG_MASKEDMIN_FILTER) += vf_maskedminmax.o framesync.o
|
||||
OBJS-$(CONFIG_MASKEDTHRESHOLD_FILTER) += vf_maskedthreshold.o framesync.o
|
||||
OBJS-$(CONFIG_MASKFUN_FILTER) += vf_maskfun.o
|
||||
OBJS-$(CONFIG_MASKFUN_FILTER) += vf_matrix.o
|
||||
OBJS-$(CONFIG_MCDEINT_FILTER) += vf_mcdeint.o
|
||||
OBJS-$(CONFIG_MEDIAN_FILTER) += vf_median.o
|
||||
OBJS-$(CONFIG_MERGEPLANES_FILTER) += vf_mergeplanes.o framesync.o
|
||||
|
|
|
@ -199,6 +199,7 @@ extern const AVFilter ff_vf_colorchannelmixer;
|
|||
extern const AVFilter ff_vf_colorcontrast;
|
||||
extern const AVFilter ff_vf_colorcorrect;
|
||||
extern const AVFilter ff_vf_colorize;
|
||||
extern const AVFilter ff_vf_colorizelsd;
|
||||
extern const AVFilter ff_vf_colorkey;
|
||||
extern const AVFilter ff_vf_colorkey_opencl;
|
||||
extern const AVFilter ff_vf_colorhold;
|
||||
|
@ -332,6 +333,7 @@ extern const AVFilter ff_vf_maskedmerge;
|
|||
extern const AVFilter ff_vf_maskedmin;
|
||||
extern const AVFilter ff_vf_maskedthreshold;
|
||||
extern const AVFilter ff_vf_maskfun;
|
||||
extern const AVFilter ff_vf_matrix;
|
||||
extern const AVFilter ff_vf_mcdeint;
|
||||
extern const AVFilter ff_vf_median;
|
||||
extern const AVFilter ff_vf_mergeplanes;
|
||||
|
|
|
@ -0,0 +1,262 @@
|
|||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
extern "C" {
|
||||
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "avfilter.h"
|
||||
#include "formats.h"
|
||||
#include "internal.h"
|
||||
#include "video.h"
|
||||
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
|
||||
typedef struct ColorizeLSDContext {
|
||||
const AVClass *classs;
|
||||
|
||||
// user input
|
||||
float speed;
|
||||
float hue;
|
||||
float saturation;
|
||||
float lightness;
|
||||
float mix;
|
||||
float mixcolor;
|
||||
|
||||
int depth; // bit depth of planes
|
||||
int c[3]; // yuv of current overlay color (recalculated every frame)
|
||||
|
||||
// size of input/output planes
|
||||
int planewidth[4];
|
||||
int planeheight[4];
|
||||
|
||||
// ptr to multithreaded functions
|
||||
// one function for the y plane one for uv
|
||||
int (*do_plane_slice[2])(AVFilterContext *s, void *arg,
|
||||
int jobnr, int nb_jobs, int plane);
|
||||
} ColorizeLSDContext;
|
||||
|
||||
} // extern c
|
||||
|
||||
static inline float lerpf(float v0, float v1, float f)
|
||||
{
|
||||
return v0 + (v1 - v0) * f;
|
||||
}
|
||||
|
||||
|
||||
// luminance slices
|
||||
template<typename T, bool color>
|
||||
static int colorize_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs, int plane)
|
||||
{
|
||||
ColorizeLSDContext *s = (ColorizeLSDContext *) ctx->priv;
|
||||
AVFrame *frame = (AVFrame *) arg;
|
||||
const int width = s->planewidth[color]; // plane 1 & 2 are the same size
|
||||
const int height = s->planeheight[color];
|
||||
const int slice_start = (height * jobnr) / nb_jobs;
|
||||
const int slice_end = (height * (jobnr + 1)) / nb_jobs;
|
||||
|
||||
const float mix = color ? s->mixcolor : s->mix;
|
||||
|
||||
const int linesize = frame->linesize[plane] / sizeof(T);
|
||||
|
||||
T *ptr = (T*) frame->data[plane] + slice_start * linesize;
|
||||
|
||||
const int c = s->c[plane]; // planes color component of target color
|
||||
|
||||
for (int y = slice_start; y < slice_end; y++) {
|
||||
for (int x = 0; x < width; x++) {
|
||||
ptr[x] = lerpf(c, ptr[x], mix);
|
||||
}
|
||||
|
||||
ptr += linesize;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
|
||||
static int do_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
|
||||
{
|
||||
ColorizeLSDContext *s = (ColorizeLSDContext *) ctx->priv;
|
||||
|
||||
s->do_plane_slice[0](ctx, arg, jobnr, nb_jobs, 0); // y plane
|
||||
s->do_plane_slice[1](ctx, arg, jobnr, nb_jobs, 1); // u plane
|
||||
s->do_plane_slice[1](ctx, arg, jobnr, nb_jobs, 2); // v plane
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static float hue2rgb(float p, float q, float t)
|
||||
{
|
||||
if (t < 0.f) t += 1.f;
|
||||
if (t > 1.f) t -= 1.f;
|
||||
if (t < 1.f/6.f) return p + (q - p) * 6.f * t;
|
||||
if (t < 1.f/2.f) return q;
|
||||
if (t < 2.f/3.f) return p + (q - p) * (2.f/3.f - t) * 6.f;
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
static void hsl2rgb(float h, float s, float l, float *r, float *g, float *b)
|
||||
{
|
||||
h /= 360.f;
|
||||
|
||||
if (s == 0.f) {
|
||||
*r = *g = *b = l;
|
||||
} else {
|
||||
const float q = l < 0.5f ? l * (1.f + s) : l + s - l * s;
|
||||
const float p = 2.f * l - q;
|
||||
|
||||
*r = hue2rgb(p, q, h + 1.f / 3.f);
|
||||
*g = hue2rgb(p, q, h);
|
||||
*b = hue2rgb(p, q, h - 1.f / 3.f);
|
||||
}
|
||||
}
|
||||
|
||||
static void rgb2yuv(float r, float g, float b, int *y, int *u, int *v, int depth)
|
||||
{
|
||||
*y = ((0.21260*219.0/255.0) * r + (0.71520*219.0/255.0) * g +
|
||||
(0.07220*219.0/255.0) * b) * ((1 << depth) - 1);
|
||||
*u = (-(0.11457*224.0/255.0) * r - (0.38543*224.0/255.0) * g +
|
||||
(0.50000*224.0/255.0) * b + 0.5) * ((1 << depth) - 1);
|
||||
*v = ((0.50000*224.0/255.0) * r - (0.45415*224.0/255.0) * g -
|
||||
(0.04585*224.0/255.0) * b + 0.5) * ((1 << depth) - 1);
|
||||
}
|
||||
|
||||
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
|
||||
{
|
||||
double timebase = av_q2d(inlink->time_base);
|
||||
AVFilterContext *ctx = (AVFilterContext *) inlink->dst;
|
||||
ColorizeLSDContext *s = (ColorizeLSDContext *) ctx->priv;
|
||||
float c[3]; // temp rgb
|
||||
|
||||
double time = timebase * frame->pts;
|
||||
uint32_t pos = (uint32_t) ((time * s->speed * 360) + s->hue) % 360;
|
||||
|
||||
hsl2rgb(pos, s->saturation, s->lightness, &c[0], &c[1], &c[2]);
|
||||
rgb2yuv(c[0], c[1], c[2], &s->c[0], &s->c[1], &s->c[2], s->depth);
|
||||
|
||||
ff_filter_execute(ctx, do_slice, frame, NULL,
|
||||
FFMIN(s->planeheight[1], ff_filter_get_nb_threads(ctx)));
|
||||
|
||||
return ff_filter_frame(ctx->outputs[0], frame);
|
||||
}
|
||||
|
||||
static const enum AVPixelFormat pixel_fmts[] = {
|
||||
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
|
||||
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
|
||||
AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
|
||||
AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
|
||||
AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
|
||||
AV_PIX_FMT_YUVJ411P,
|
||||
AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
|
||||
AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
|
||||
AV_PIX_FMT_YUV440P10,
|
||||
AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12,
|
||||
AV_PIX_FMT_YUV440P12,
|
||||
AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14,
|
||||
AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
|
||||
AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
|
||||
AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_YUVA444P16,
|
||||
AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_YUVA422P16,
|
||||
AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA420P16,
|
||||
AV_PIX_FMT_NONE
|
||||
};
|
||||
|
||||
static av_cold int config_input(AVFilterLink *inlink)
|
||||
{
|
||||
AVFilterContext *ctx = (AVFilterContext *) inlink->dst;
|
||||
ColorizeLSDContext *s = (ColorizeLSDContext *) ctx->priv;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get((AVPixelFormat) inlink->format);
|
||||
int depth;
|
||||
|
||||
s->depth = depth = desc->comp[0].depth;
|
||||
|
||||
s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
|
||||
s->planewidth[0] = s->planewidth[3] = inlink->w;
|
||||
s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
|
||||
s->planeheight[0] = s->planeheight[3] = inlink->h;
|
||||
|
||||
s->do_plane_slice[0] = depth <= 8 ? colorize_slice<uint8_t, false> : colorize_slice<uint16_t, false>;
|
||||
s->do_plane_slice[1] = depth <= 8 ? colorize_slice<uint8_t, true> : colorize_slice<uint16_t, true>;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const AVFilterPad colorizelsd_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.flags = AVFILTERPAD_FLAG_NEEDS_WRITABLE,
|
||||
.filter_frame = filter_frame,
|
||||
.config_props = config_input,
|
||||
},
|
||||
};
|
||||
|
||||
static const AVFilterPad colorizelsd_outputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
},
|
||||
};
|
||||
|
||||
#define OFFSET(x) offsetof(ColorizeLSDContext, x)
|
||||
#define VF AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
|
||||
|
||||
static const AVOption colorizelsd_options[] = {
|
||||
{ "speed", "set the rotation speed in r/s", OFFSET(speed), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0.0000001, 1000, VF},
|
||||
{ "hue", "set the start hue", OFFSET(hue), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, 360, VF },
|
||||
{ "saturation", "set the start saturation", OFFSET(saturation), AV_OPT_TYPE_FLOAT, {.dbl=0.5},0, 1, VF },
|
||||
{ "lightness", "set the start lightness", OFFSET(lightness), AV_OPT_TYPE_FLOAT, {.dbl=0.5},0, 1, VF },
|
||||
{ "mix", "set the mix of source lightness", OFFSET(mix), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, VF },
|
||||
{ "mixcolor", "set the mix of source color", OFFSET(mixcolor), AV_OPT_TYPE_FLOAT, {.dbl=0.5}, 0, 1, VF },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(colorizelsd);
|
||||
|
||||
extern "C" const AVFilter ff_vf_colorizelsd = {
|
||||
.name = "colorizelsd",
|
||||
.description = NULL_IF_CONFIG_SMALL("Overlay a color change on the video stream."),
|
||||
// FILTER_INPUTS(colorizelsd_inputs),
|
||||
// FILTER_OUTPUTS(colorizelsd_outputs),
|
||||
.inputs = colorizelsd_inputs,
|
||||
.outputs = colorizelsd_outputs,
|
||||
.priv_class = &colorizelsd_class,
|
||||
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
|
||||
.nb_inputs = 1,
|
||||
.nb_outputs = 1,
|
||||
.formats_state = FF_FILTER_FORMATS_PIXFMT_LIST,
|
||||
|
||||
// preinit
|
||||
// init
|
||||
// init_dict
|
||||
// uninit
|
||||
|
||||
.formats = { .pixels_list = pixel_fmts },
|
||||
.priv_size = sizeof(ColorizeLSDContext),
|
||||
|
||||
// FILTER_PIXFMTS_ARRAY(pixel_fmts),
|
||||
.process_command = NULL, // ff_filter_process_command
|
||||
|
||||
// activate
|
||||
};
|
||||
} // extern c
|
|
@ -0,0 +1,206 @@
|
|||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
extern "C" {
|
||||
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "avfilter.h"
|
||||
#include "formats.h"
|
||||
#include "internal.h"
|
||||
#include "video.h"
|
||||
|
||||
}
|
||||
|
||||
#include <iostream>
|
||||
|
||||
extern "C" {
|
||||
|
||||
typedef struct MatrixContext {
|
||||
const AVClass *classs;
|
||||
|
||||
// user input
|
||||
unsigned int w;
|
||||
unsigned int h;
|
||||
|
||||
int depth; // bit depth of planes
|
||||
|
||||
// size of input planes
|
||||
int planewidth[4];
|
||||
int planeheight[4];
|
||||
|
||||
// ptr to multithreaded functions
|
||||
// one function for the y plane one for uv
|
||||
int (*do_plane)(AVFilterContext *s, void *arg, int plane);
|
||||
} MatrixContext;
|
||||
|
||||
} // extern c
|
||||
|
||||
|
||||
// luminance slices
|
||||
|
||||
template<typename T>
|
||||
static inline T getPxl(T* planeData, uint32_t linesize, uint32_t h, uint32_t x, uint32_t y) {
|
||||
uint32_t yc = (y % h);
|
||||
uint32_t xc = (x % linesize);
|
||||
return (planeData[yc * linesize + xc]);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static int matrix_processplane(AVFilterContext *ctx, void *arg, int plane)
|
||||
{
|
||||
MatrixContext *s = (MatrixContext *) ctx->priv;
|
||||
AVFrame *frame = (AVFrame *) arg;
|
||||
const int width = s->planewidth[plane];
|
||||
const int height = s->planeheight[plane];
|
||||
|
||||
const unsigned int w = s->w;
|
||||
const unsigned int h = s->h;
|
||||
|
||||
const int linesize = frame->linesize[plane] / sizeof(T);
|
||||
|
||||
T *ptr = (T*) frame->data[plane];
|
||||
|
||||
// copy frame
|
||||
T* origframe = new T[width * height];
|
||||
memcpy(origframe, ptr, width * height * sizeof(T));
|
||||
|
||||
// apply mosaik
|
||||
for (int y = 0; y < height; y++) {
|
||||
for (int x = 0; x < width; x++) {
|
||||
ptr[x] = getPxl(origframe, linesize, height, x * w, y * h);
|
||||
}
|
||||
|
||||
ptr += linesize;
|
||||
}
|
||||
|
||||
delete[] origframe;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
|
||||
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
|
||||
{
|
||||
AVFilterContext *ctx = (AVFilterContext *) inlink->dst;
|
||||
MatrixContext *s = (MatrixContext *) ctx->priv;
|
||||
|
||||
s->do_plane(ctx, frame, 0); // y plane
|
||||
s->do_plane(ctx, frame, 1); // u plane
|
||||
s->do_plane(ctx, frame, 2); // v plane
|
||||
|
||||
return ff_filter_frame(ctx->outputs[0], frame);
|
||||
}
|
||||
|
||||
static const enum AVPixelFormat pixel_fmts[] = {
|
||||
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
|
||||
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
|
||||
AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
|
||||
AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
|
||||
AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
|
||||
AV_PIX_FMT_YUVJ411P,
|
||||
AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
|
||||
AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
|
||||
AV_PIX_FMT_YUV440P10,
|
||||
AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12,
|
||||
AV_PIX_FMT_YUV440P12,
|
||||
AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14,
|
||||
AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
|
||||
AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
|
||||
AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_YUVA444P16,
|
||||
AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_YUVA422P16,
|
||||
AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA420P16,
|
||||
AV_PIX_FMT_NONE
|
||||
};
|
||||
|
||||
static av_cold int config_input(AVFilterLink *inlink)
|
||||
{
|
||||
AVFilterContext *ctx = (AVFilterContext *) inlink->dst;
|
||||
MatrixContext *s = (MatrixContext *) ctx->priv;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get((AVPixelFormat) inlink->format);
|
||||
int depth;
|
||||
|
||||
s->depth = depth = desc->comp[0].depth;
|
||||
|
||||
s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
|
||||
s->planewidth[0] = s->planewidth[3] = inlink->w;
|
||||
s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
|
||||
s->planeheight[0] = s->planeheight[3] = inlink->h;
|
||||
|
||||
s->do_plane = depth <= 8 ? matrix_processplane<uint8_t> : matrix_processplane<uint16_t>;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const AVFilterPad matrix_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.flags = AVFILTERPAD_FLAG_NEEDS_WRITABLE,
|
||||
.filter_frame = filter_frame,
|
||||
.config_props = config_input,
|
||||
},
|
||||
};
|
||||
|
||||
static const AVFilterPad matrix_outputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
},
|
||||
};
|
||||
|
||||
#define OFFSET(x) offsetof(MatrixContext, x)
|
||||
#define VF AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
|
||||
|
||||
static const AVOption matrix_options[] = {
|
||||
{ "w", "width", OFFSET(w), AV_OPT_TYPE_INT, {.i64=2}, 1, 1000, VF},
|
||||
{ "h", "height", OFFSET(h), AV_OPT_TYPE_INT, {.i64=2}, 1, 1000, VF},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(matrix);
|
||||
|
||||
extern "C" const AVFilter ff_vf_matrix = {
|
||||
.name = "matrix",
|
||||
.description = NULL_IF_CONFIG_SMALL("Multiply a video into a Matrix"),
|
||||
// FILTER_INPUTS(matrix_inputs),
|
||||
// FILTER_OUTPUTS(matrix_outputs),
|
||||
.inputs = matrix_inputs,
|
||||
.outputs = matrix_outputs,
|
||||
.priv_class = &matrix_class,
|
||||
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC , // | AVFILTER_FLAG_SLICE_THREADS,
|
||||
.nb_inputs = 1,
|
||||
.nb_outputs = 1,
|
||||
.formats_state = FF_FILTER_FORMATS_PIXFMT_LIST,
|
||||
|
||||
// preinit
|
||||
// init
|
||||
// init_dict
|
||||
// uninit
|
||||
|
||||
.formats = { .pixels_list = pixel_fmts },
|
||||
.priv_size = sizeof(MatrixContext),
|
||||
|
||||
// FILTER_PIXFMTS_ARRAY(pixel_fmts),
|
||||
.process_command = NULL, // ff_filter_process_command
|
||||
|
||||
// activate
|
||||
};
|
||||
|
||||
} // extern c
|
|
@ -0,0 +1,14 @@
|
|||
notes:
|
||||
ffmpeg.c:
|
||||
key scanning: 3905
|
||||
mainloop: 4656
|
||||
transcode_init: 3667
|
||||
|
||||
pause added: 4685
|
||||
print size adj.: 1773
|
||||
|
||||
ffmpeg_opt.c:
|
||||
max_muxing_queue_size mod: 1543
|
||||
|
||||
cmdutils.c:
|
||||
alwayshidebanner mod: 77
|
|
@ -0,0 +1,4 @@
|
|||
sudo apt install libopenmpt-dev libopenjp2-7-dev librsvg2-dev librubberband-dev libsnappy-dev libspeex-dev libtheora-dev libtwolame-dev libzvbi-dev libavdevice-dev nasm libladspa-ocaml-dev libgnutls28-dev libfrei0r-ocaml-dev libchromaprint-dev libass-dev libbluray-dev libbs2b-dev libcaca-dev libdrm-dev libgme-dev libgsm1-dev libmp3lame-dev libopus-dev libpulse-dev libwebp-dev libvorbis-dev libxvidcore-dev libsoxr-dev libshine-dev libssh-dev libvpx-dev libwavpack-dev libx264-dev libx265-dev libsdl2-dev libdc1394-22-dev libxcb-shape0-dev libxcb-xfixes0-dev
|
||||
|
||||
./configure --prefix=/usr --extra-version=0ubuntu0.18.04.1 --toolchain=hardened --libdir=/usr/lib/x86_64-linux-gnu --incdir=/usr/include/x86_64-linux-gnu --enable-gpl --enable-avresample --enable-gnutls --enable-ladspa --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libgme --enable-libgsm --enable-libmp3lame --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librubberband --enable-librsvg --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libssh --enable-libtheora --enable-libtwolame --enable-libvorbis --enable-libvpx --enable-libwebp --enable-libx265 --enable-libxml2 --enable-libxvid --enable-libzvbi --enable-sdl2 --enable-libdc1394 --enable-libdrm --enable-chromaprint --enable-frei0r --enable-libx264
|
||||
|
Loading…
Reference in New Issue